Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Jun 2009 17:08:33 +0000 (10:08 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Jun 2009 17:08:33 +0000 (10:08 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (266 commits)
  sh: Tie sparseirq in to Kconfig.
  sh: Wire up sys_rt_tgsigqueueinfo.
  sh: Fix sys_pwritev() syscall table entry for sh32.
  sh: Fix sh4a llsc-based cmpxchg()
  sh: sh7724: Add JPU support
  sh: sh7724: INTC setting update
  sh: sh7722 clock framework rewrite
  sh: sh7366 clock framework rewrite
  sh: sh7343 clock framework rewrite
  sh: sh7724 clock framework rewrite V3
  sh: sh7723 clock framework rewrite V2
  sh: add enable()/disable()/set_rate() to div6 code
  sh: add AP325RXA mode pin configuration
  sh: add Migo-R mode pin configuration
  sh: sh7722 mode pin definitions
  sh: sh7724 mode pin comments
  sh: sh7723 mode pin V2
  sh: rework mode pin code
  sh: clock div6 helper code
  sh: clock div4 frequency table offset fix
  ...

280 files changed:
Documentation/Smack.txt
Documentation/kernel-parameters.txt
Documentation/sysctl/kernel.txt
MAINTAINERS
arch/arm/plat-mxc/include/mach/imx-uart.h
arch/frv/Kconfig
arch/frv/include/asm/bitops.h
arch/frv/include/asm/elf.h
arch/frv/include/asm/pci.h
arch/frv/include/asm/ptrace.h
arch/frv/include/asm/syscall.h [new file with mode: 0644]
arch/frv/include/asm/thread_info.h
arch/frv/kernel/entry.S
arch/frv/kernel/ptrace.c
arch/frv/kernel/signal.c
arch/frv/kernel/uaccess.c
arch/frv/mb93090-mb00/pci-dma-nommu.c
arch/frv/mb93090-mb00/pci-dma.c
arch/ia64/include/asm/kvm_host.h
arch/ia64/include/asm/pgtable.h
arch/ia64/kernel/irq_ia64.c
arch/ia64/kvm/Kconfig
arch/ia64/kvm/kvm-ia64.c
arch/ia64/kvm/kvm_fw.c
arch/ia64/kvm/lapic.h
arch/ia64/kvm/optvfault.S
arch/ia64/kvm/process.c
arch/ia64/kvm/vcpu.c
arch/ia64/kvm/vmm.c
arch/ia64/kvm/vmm_ivt.S
arch/ia64/kvm/vtlb.c
arch/mn10300/Kconfig
arch/mn10300/include/asm/elf.h
arch/mn10300/include/asm/processor.h
arch/mn10300/include/asm/ptrace.h
arch/mn10300/kernel/entry.S
arch/mn10300/kernel/ptrace.c
arch/mn10300/kernel/signal.c
arch/mn10300/mm/tlb-mn10300.S
arch/powerpc/kvm/powerpc.c
arch/s390/include/asm/kvm_host.h
arch/s390/kvm/intercept.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/priv.c
arch/s390/kvm/sigp.c
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/kvm.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/kvm_x86_emulate.h
arch/x86/include/asm/svm.h
arch/x86/include/asm/termios.h
arch/x86/include/asm/vmx.h
arch/x86/kernel/cpu/mcheck/mce_64.c
arch/x86/kernel/kvm.c
arch/x86/kernel/smp.c
arch/x86/kvm/Kconfig
arch/x86/kvm/Makefile
arch/x86/kvm/i8254.c
arch/x86/kvm/i8254.h
arch/x86/kvm/irq.c
arch/x86/kvm/kvm_timer.h [new file with mode: 0644]
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/svm.c
arch/x86/kvm/timer.c [new file with mode: 0644]
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/kvm/x86_emulate.c
block/blk-core.c
drivers/bluetooth/hci_ldisc.c
drivers/char/Kconfig
drivers/char/Makefile
drivers/char/bfin_jtag_comm.c [new file with mode: 0644]
drivers/char/cyclades.c
drivers/char/epca.c
drivers/char/ip2/i2lib.c
drivers/char/ip2/ip2main.c
drivers/char/isicom.c
drivers/char/istallion.c
drivers/char/moxa.c
drivers/char/mxser.c
drivers/char/n_hdlc.c
drivers/char/n_tty.c
drivers/char/pcmcia/synclink_cs.c
drivers/char/pty.c
drivers/char/rocket.c
drivers/char/selection.c
drivers/char/stallion.c
drivers/char/synclink.c
drivers/char/synclink_gt.c
drivers/char/synclinkmp.c
drivers/char/tty_audit.c
drivers/char/tty_io.c
drivers/char/tty_ioctl.c
drivers/char/tty_ldisc.c
drivers/char/tty_port.c
drivers/ide/alim15x3.c
drivers/ide/ide-atapi.c
drivers/ide/ide-cd.c
drivers/ide/ide-cd.h
drivers/ide/ide-disk.c
drivers/ide/ide-dma.c
drivers/ide/ide-floppy.c
drivers/ide/ide-io.c
drivers/ide/ide-ioctls.c
drivers/ide/ide-park.c
drivers/ide/ide-pm.c
drivers/ide/ide-tape.c
drivers/ide/ide-taskfile.c
drivers/parport/parport_pc.c
drivers/serial/8250.c
drivers/serial/8250_pci.c
drivers/serial/Kconfig
drivers/serial/Makefile
drivers/serial/bfin_5xx.c
drivers/serial/bfin_sport_uart.c
drivers/serial/icom.c
drivers/serial/imx.c
drivers/serial/jsm/jsm.h
drivers/serial/jsm/jsm_tty.c
drivers/serial/timbuart.c [new file with mode: 0644]
drivers/serial/timbuart.h [new file with mode: 0644]
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/serial/belkin_sa.c
drivers/usb/serial/ch341.c
drivers/usb/serial/console.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/cyberjack.c
drivers/usb/serial/cypress_m8.c
drivers/usb/serial/digi_acceleport.c
drivers/usb/serial/empeg.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/garmin_gps.c
drivers/usb/serial/generic.c
drivers/usb/serial/io_edgeport.c
drivers/usb/serial/io_ti.c
drivers/usb/serial/ipaq.c
drivers/usb/serial/ipw.c
drivers/usb/serial/ir-usb.c
drivers/usb/serial/iuu_phoenix.c
drivers/usb/serial/keyspan.c
drivers/usb/serial/keyspan.h
drivers/usb/serial/keyspan_pda.c
drivers/usb/serial/kl5kusb105.c
drivers/usb/serial/kobil_sct.c
drivers/usb/serial/mct_u232.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/navman.c
drivers/usb/serial/omninet.c
drivers/usb/serial/opticon.c
drivers/usb/serial/option.c
drivers/usb/serial/oti6858.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/sierra.c
drivers/usb/serial/spcp8x5.c
drivers/usb/serial/symbolserial.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/usb-serial.c
drivers/usb/serial/visor.c
drivers/usb/serial/whiteheat.c
fs/buffer.c
fs/cifs/CHANGES
fs/cifs/README
fs/cifs/cifs_spnego.c
fs/cifs/cifsacl.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/netmisc.c
fs/cifs/readdir.c
fs/compat.c
fs/devpts/inode.c
fs/exec.c
fs/ext2/super.c
fs/ext3/super.c
fs/ext4/Makefile
fs/ext4/balloc.c
fs/ext4/block_validity.c [new file with mode: 0644]
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/ext4_i.h [deleted file]
fs/ext4/ext4_sb.h [deleted file]
fs/ext4/extents.c
fs/ext4/group.h [deleted file]
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/mballoc.h
fs/ext4/namei.c
fs/ext4/namei.h [deleted file]
fs/ext4/resize.c
fs/ext4/super.c
fs/hugetlbfs/inode.c
fs/ioctl.c
fs/jbd2/journal.c
fs/mpage.c
fs/namei.c
fs/nfsd/vfs.c
fs/proc/base.c
include/linux/cyclades.h
include/linux/ide.h
include/linux/ima.h
include/linux/init_task.h
include/linux/kvm.h
include/linux/kvm_host.h
include/linux/kvm_types.h
include/linux/lsm_audit.h [new file with mode: 0644]
include/linux/magic.h
include/linux/mm.h
include/linux/pci_ids.h
include/linux/rational.h [new file with mode: 0644]
include/linux/sched.h
include/linux/security.h
include/linux/serial.h
include/linux/serial_core.h
include/linux/tty.h
include/linux/tty_driver.h
include/linux/usb/serial.h
ipc/shm.c
kernel/cred.c
kernel/exit.c
kernel/module.c
kernel/ptrace.c
kernel/signal.c
kernel/sysctl.c
lib/Kconfig
lib/Makefile
lib/rational.c [new file with mode: 0644]
mm/Kconfig
mm/mmap.c
mm/nommu.c
mm/shmem.c
security/Kconfig
security/Makefile
security/commoncap.c
security/inode.c
security/integrity/ima/ima_audit.c
security/integrity/ima/ima_crypto.c
security/integrity/ima/ima_fs.c
security/integrity/ima/ima_iint.c
security/integrity/ima/ima_init.c
security/integrity/ima/ima_main.c
security/integrity/ima/ima_policy.c
security/lsm_audit.c [new file with mode: 0644]
security/root_plug.c
security/security.c
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/include/security.h
security/selinux/nlmsgtab.c
security/selinux/selinuxfs.c
security/selinux/ss/services.c
security/smack/smack.h
security/smack/smack_access.c
security/smack/smack_lsm.c
security/smack/smackfs.c
security/tomoyo/common.c
security/tomoyo/common.h
security/tomoyo/domain.c
security/tomoyo/file.c
security/tomoyo/realpath.c
security/tomoyo/tomoyo.c
security/tomoyo/tomoyo.h
virt/kvm/ioapic.c
virt/kvm/ioapic.h
virt/kvm/iommu.c
virt/kvm/irq_comm.c
virt/kvm/kvm_main.c

index 629c92e99783ecb7934ee00cf42c4a9f8556004f..34614b4c708eba850acad3ac15f7e59add026f3e 100644 (file)
@@ -184,8 +184,9 @@ length. Single character labels using special characters, that being anything
 other than a letter or digit, are reserved for use by the Smack development
 team. Smack labels are unstructured, case sensitive, and the only operation
 ever performed on them is comparison for equality. Smack labels cannot
-contain unprintable characters or the "/" (slash) character. Smack labels
-cannot begin with a '-', which is reserved for special options.
+contain unprintable characters, the "/" (slash), the "\" (backslash), the "'"
+(quote) and '"' (double-quote) characters.
+Smack labels cannot begin with a '-', which is reserved for special options.
 
 There are some predefined labels:
 
@@ -523,3 +524,18 @@ Smack supports some mount options:
 
 These mount options apply to all file system types.
 
+Smack auditing
+
+If you want Smack auditing of security events, you need to set CONFIG_AUDIT
+in your kernel configuration.
+By default, all denied events will be audited. You can change this behavior by
+writing a single character to the /smack/logging file :
+0 : no logging
+1 : log denied (default)
+2 : log accepted
+3 : log denied & accepted
+
+Events are logged as 'key=value' pairs, for each event you at least will get
+the subjet, the object, the rights requested, the action, the kernel function
+that triggered the event, plus other pairs depending on the type of event
+audited.
index 4a3c2209a124b9d34f5305e66992cbc350e1df96..72d3bf08d79b769ec909ed8d37977bea41b6679f 100644 (file)
@@ -928,6 +928,12 @@ and is between 256 and 4096 characters. It is defined in the file
                        Formt: { "sha1" | "md5" }
                        default: "sha1"
 
+       ima_tcb         [IMA]
+                       Load a policy which meets the needs of the Trusted
+                       Computing Base.  This means IMA will measure all
+                       programs exec'd, files mmap'd for exec, and all files
+                       opened for read by uid=0.
+
        in2000=         [HW,SCSI]
                        See header of drivers/scsi/in2000.c.
 
index f11ca7979fa67b5bbf08339cf473d110ecf92f7d..322a00bb99d97f703130de7f349f2dbc9b6fa24e 100644 (file)
@@ -32,6 +32,7 @@ show up in /proc/sys/kernel:
 - kstack_depth_to_print       [ X86 only ]
 - l2cr                        [ PPC only ]
 - modprobe                    ==> Documentation/debugging-modules.txt
+- modules_disabled
 - msgmax
 - msgmnb
 - msgmni
@@ -184,6 +185,16 @@ kernel stack.
 
 ==============================================================
 
+modules_disabled:
+
+A toggle value indicating if modules are allowed to be loaded
+in an otherwise modular kernel.  This toggle defaults to off
+(0), but can be set true (1).  Once true, modules can be
+neither loaded nor unloaded, and the toggle cannot be set back
+to false.
+
+==============================================================
+
 osrelease, ostype & version:
 
 # cat osrelease
index cf4abddfc8a40dcc161e851c26294480fff3076f..84285b5ba359c26cb53811937ab5f4422e94ff6f 100644 (file)
@@ -71,7 +71,7 @@ P: Person
 M: Mail patches to
 L: Mailing list that is relevant to this area
 W: Web-page with status/info
-T: SCM tree type and location.  Type is one of: git, hg, quilt.
+T: SCM tree type and location.  Type is one of: git, hg, quilt, stgit.
 S: Status, one of the following:
 
        Supported:      Someone is actually paid to look after this.
@@ -159,7 +159,8 @@ F:  drivers/net/r8169.c
 8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
 L:     linux-serial@vger.kernel.org
 W:     http://serial.sourceforge.net
-S:     Orphan
+M:     alan@lxorguk.ukuu.org.uk
+S:     Odd Fixes
 F:     drivers/serial/8250*
 F:     include/linux/serial_8250.h
 
@@ -5629,6 +5630,7 @@ P:        Alan Cox
 M:     alan@lxorguk.ukuu.org.uk
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
+T:     stgit http://zeniv.linux.org.uk/~alan/ttydev/
 
 TULIP NETWORK DRIVERS
 P:     Grant Grundler
index 599217b2e13f94cea800c55653e6497c9d2ff3ad..f9bd17dd8dd71af1fd6d6c05f7c7781b51daea26 100644 (file)
 #define ASMARM_ARCH_UART_H
 
 #define IMXUART_HAVE_RTSCTS (1<<0)
+#define IMXUART_IRDA        (1<<1)
 
 struct imxuart_platform_data {
        int (*init)(struct platform_device *pdev);
        int (*exit)(struct platform_device *pdev);
        unsigned int flags;
+       void (*irda_enable)(int enable);
+       unsigned int irda_inv_rx:1;
+       unsigned int irda_inv_tx:1;
+       unsigned short transceiver_delay;
 };
 
 #endif
index 9d1552a9ee2c88ddb40bc7d70dea316f725d2843..8a5bd7a9c6f533b452b21de81127c476b1826765 100644 (file)
@@ -6,6 +6,7 @@ config FRV
        bool
        default y
        select HAVE_IDE
+       select HAVE_ARCH_TRACEHOOK
 
 config ZONE_DMA
        bool
index 287f6f697ce276d55b498cea168a67e402e6b5bf..50ae91b29674584743e5935aa49e0717fb260b34 100644 (file)
@@ -112,7 +112,7 @@ extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsig
 #define atomic_clear_mask(mask, v)     atomic_test_and_ANDNOT_mask((mask), (v))
 #define atomic_set_mask(mask, v)       atomic_test_and_OR_mask((mask), (v))
 
-static inline int test_and_clear_bit(int nr, volatile void *addr)
+static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
 {
        volatile unsigned long *ptr = addr;
        unsigned long mask = 1UL << (nr & 31);
@@ -120,7 +120,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
        return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0;
 }
 
-static inline int test_and_set_bit(int nr, volatile void *addr)
+static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
 {
        volatile unsigned long *ptr = addr;
        unsigned long mask = 1UL << (nr & 31);
@@ -128,7 +128,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
        return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0;
 }
 
-static inline int test_and_change_bit(int nr, volatile void *addr)
+static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
 {
        volatile unsigned long *ptr = addr;
        unsigned long mask = 1UL << (nr & 31);
@@ -136,22 +136,22 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
        return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0;
 }
 
-static inline void clear_bit(int nr, volatile void *addr)
+static inline void clear_bit(unsigned long nr, volatile void *addr)
 {
        test_and_clear_bit(nr, addr);
 }
 
-static inline void set_bit(int nr, volatile void *addr)
+static inline void set_bit(unsigned long nr, volatile void *addr)
 {
        test_and_set_bit(nr, addr);
 }
 
-static inline void change_bit(int nr, volatile void * addr)
+static inline void change_bit(unsigned long nr, volatile void *addr)
 {
        test_and_change_bit(nr, addr);
 }
 
-static inline void __clear_bit(int nr, volatile void * addr)
+static inline void __clear_bit(unsigned long nr, volatile void *addr)
 {
        volatile unsigned long *a = addr;
        int mask;
@@ -161,7 +161,7 @@ static inline void __clear_bit(int nr, volatile void * addr)
        *a &= ~mask;
 }
 
-static inline void __set_bit(int nr, volatile void * addr)
+static inline void __set_bit(unsigned long nr, volatile void *addr)
 {
        volatile unsigned long *a = addr;
        int mask;
@@ -171,7 +171,7 @@ static inline void __set_bit(int nr, volatile void * addr)
        *a |= mask;
 }
 
-static inline void __change_bit(int nr, volatile void *addr)
+static inline void __change_bit(unsigned long nr, volatile void *addr)
 {
        volatile unsigned long *a = addr;
        int mask;
@@ -181,7 +181,7 @@ static inline void __change_bit(int nr, volatile void *addr)
        *a ^= mask;
 }
 
-static inline int __test_and_clear_bit(int nr, volatile void * addr)
+static inline int __test_and_clear_bit(unsigned long nr, volatile void *addr)
 {
        volatile unsigned long *a = addr;
        int mask, retval;
@@ -193,7 +193,7 @@ static inline int __test_and_clear_bit(int nr, volatile void * addr)
        return retval;
 }
 
-static inline int __test_and_set_bit(int nr, volatile void * addr)
+static inline int __test_and_set_bit(unsigned long nr, volatile void *addr)
 {
        volatile unsigned long *a = addr;
        int mask, retval;
@@ -205,7 +205,7 @@ static inline int __test_and_set_bit(int nr, volatile void * addr)
        return retval;
 }
 
-static inline int __test_and_change_bit(int nr, volatile void * addr)
+static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
 {
        volatile unsigned long *a = addr;
        int mask, retval;
@@ -220,12 +220,13 @@ static inline int __test_and_change_bit(int nr, volatile void * addr)
 /*
  * This routine doesn't need to be atomic.
  */
-static inline int __constant_test_bit(int nr, const volatile void * addr)
+static inline int
+__constant_test_bit(unsigned long nr, const volatile void *addr)
 {
        return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
 }
 
-static inline int __test_bit(int nr, const volatile void * addr)
+static inline int __test_bit(unsigned long nr, const volatile void *addr)
 {
        int     * a = (int *) addr;
        int     mask;
index 7279ec07d62e32007f91a01bfe58a20014283a4d..7bbf6e47f8c8a5cb216c9b8f5e0b8be3b902c5bb 100644 (file)
@@ -116,6 +116,7 @@ do {                                                                                        \
 } while(0)
 
 #define USE_ELF_CORE_DUMP
+#define CORE_DUMP_USE_REGSET
 #define ELF_FDPIC_CORE_EFLAGS  EF_FRV_FDPIC
 #define ELF_EXEC_PAGESIZE      16384
 
index 585d9b49949a7537d9d1f89221f93c047cfedacf..cc685e60b0f9ce92c06372cd695346a3b3ba6b17 100644 (file)
@@ -87,8 +87,7 @@ static inline void pci_dma_sync_single(struct pci_dev *hwdev,
                                       dma_addr_t dma_handle,
                                       size_t size, int direction)
 {
-       if (direction == PCI_DMA_NONE)
-                BUG();
+       BUG_ON(direction == PCI_DMA_NONE);
 
        frv_cache_wback_inv((unsigned long)bus_to_virt(dma_handle),
                            (unsigned long)bus_to_virt(dma_handle) + size);
@@ -105,9 +104,7 @@ static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
                                   int nelems, int direction)
 {
        int i;
-
-       if (direction == PCI_DMA_NONE)
-                BUG();
+       BUG_ON(direction == PCI_DMA_NONE);
 
        for (i = 0; i < nelems; i++)
                frv_cache_wback_inv(sg_dma_address(&sg[i]),
index cf6934012b64dc70736e26073470e143b2ed11ab..a54b535c9e493f2ba18fa0974993d5859e18bf0a 100644 (file)
@@ -65,6 +65,8 @@
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
 
+struct task_struct;
+
 /*
  * we dedicate GR28 to keeping a pointer to the current exception frame
  * - gr28 is destroyed on entry to the kernel from userspace
@@ -73,11 +75,18 @@ register struct pt_regs *__frame asm("gr28");
 
 #define user_mode(regs)                        (!((regs)->psr & PSR_S))
 #define instruction_pointer(regs)      ((regs)->pc)
+#define user_stack_pointer(regs)       ((regs)->sp)
 
 extern unsigned long user_stack(const struct pt_regs *);
 extern void show_regs(struct pt_regs *);
 #define profile_pc(regs) ((regs)->pc)
-#endif
+
+#define task_pt_regs(task) ((task)->thread.frame0)
+
+#define arch_has_single_step() (1)
+extern void user_enable_single_step(struct task_struct *);
+extern void user_disable_single_step(struct task_struct *);
 
 #endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
 #endif /* _ASM_PTRACE_H */
diff --git a/arch/frv/include/asm/syscall.h b/arch/frv/include/asm/syscall.h
new file mode 100644 (file)
index 0000000..70689eb
--- /dev/null
@@ -0,0 +1,123 @@
+/* syscall parameter access functions
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H
+
+#include <linux/err.h>
+#include <asm/ptrace.h>
+
+/*
+ * Get the system call number or -1
+ */
+static inline long syscall_get_nr(struct task_struct *task,
+                                 struct pt_regs *regs)
+{
+       return regs->syscallno;
+}
+
+/*
+ * Restore the clobbered GR8 register
+ * (1st syscall arg was overwritten with syscall return or error)
+ */
+static inline void syscall_rollback(struct task_struct *task,
+                                   struct pt_regs *regs)
+{
+       regs->gr8 = regs->orig_gr8;
+}
+
+/*
+ * See if the syscall return value is an error, returning it if it is and 0 if
+ * not
+ */
+static inline long syscall_get_error(struct task_struct *task,
+                                    struct pt_regs *regs)
+{
+       return IS_ERR_VALUE(regs->gr8) ? regs->gr8 : 0;
+}
+
+/*
+ * Get the syscall return value
+ */
+static inline long syscall_get_return_value(struct task_struct *task,
+                                           struct pt_regs *regs)
+{
+       return regs->gr8;
+}
+
+/*
+ * Set the syscall return value
+ */
+static inline void syscall_set_return_value(struct task_struct *task,
+                                           struct pt_regs *regs,
+                                           int error, long val)
+{
+       if (error)
+               regs->gr8 = -error;
+       else
+               regs->gr8 = val;
+}
+
+/*
+ * Retrieve the system call arguments
+ */
+static inline void syscall_get_arguments(struct task_struct *task,
+                                        struct pt_regs *regs,
+                                        unsigned int i, unsigned int n,
+                                        unsigned long *args)
+{
+       /*
+        * Do this simply for now. If we need to start supporting
+        * fetching arguments from arbitrary indices, this will need some
+        * extra logic. Presently there are no in-tree users that depend
+        * on this behaviour.
+        */
+       BUG_ON(i);
+
+       /* Argument pattern is: GR8, GR9, GR10, GR11, GR12, GR13 */
+       switch (n) {
+       case 6: args[5] = regs->gr13;
+       case 5: args[4] = regs->gr12;
+       case 4: args[3] = regs->gr11;
+       case 3: args[2] = regs->gr10;
+       case 2: args[1] = regs->gr9;
+       case 1: args[0] = regs->gr8;
+               break;
+       default:
+               BUG();
+       }
+}
+
+/*
+ * Alter the system call arguments
+ */
+static inline void syscall_set_arguments(struct task_struct *task,
+                                        struct pt_regs *regs,
+                                        unsigned int i, unsigned int n,
+                                        const unsigned long *args)
+{
+       /* Same note as above applies */
+       BUG_ON(i);
+
+       switch (n) {
+       case 6: regs->gr13 = args[5];
+       case 5: regs->gr12 = args[4];
+       case 4: regs->gr11 = args[3];
+       case 3: regs->gr10 = args[2];
+       case 2: regs->gr9  = args[1];
+       case 1: regs->gr8  = args[0];
+               break;
+       default:
+               BUG();
+       }
+}
+
+#endif /* _ASM_SYSCALL_H */
index bb53ab753ffbf7b3a62e9e105583b4339d9768ce..e8a5ed7be0212791674996fd16afeb4f38a6b751 100644 (file)
@@ -109,20 +109,20 @@ register struct thread_info *__current_thread_info asm("gr15");
  * - other flags in MSW
  */
 #define TIF_SYSCALL_TRACE      0       /* syscall trace active */
-#define TIF_SIGPENDING         1       /* signal pending */
-#define TIF_NEED_RESCHED       2       /* rescheduling necessary */
-#define TIF_SINGLESTEP         3       /* restore singlestep on return to user mode */
-#define TIF_IRET               4       /* return with iret */
+#define TIF_NOTIFY_RESUME      1       /* callback before returning to user */
+#define TIF_SIGPENDING         2       /* signal pending */
+#define TIF_NEED_RESCHED       3       /* rescheduling necessary */
+#define TIF_SINGLESTEP         4       /* restore singlestep on return to user mode */
 #define TIF_RESTORE_SIGMASK    5       /* restore signal mask in do_signal() */
 #define TIF_POLLING_NRFLAG     16      /* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE             17      /* OOM killer killed process */
 #define TIF_FREEZE             18      /* freezing for suspend */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
 #define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
-#define _TIF_IRET              (1 << TIF_IRET)
 #define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_FREEZE            (1 << TIF_FREEZE)
index 1da523b3298e03a36c8e27e23bf9d20f10615e67..356e0e327a8923f87d366776f06e5b916852bf56 100644 (file)
@@ -886,7 +886,6 @@ system_call:
        bnc             icc0,#0,__syscall_badsys
 
        ldi             @(gr15,#TI_FLAGS),gr4
-       ori             gr4,#_TIF_SYSCALL_TRACE,gr4
        andicc          gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
        bne             icc0,#0,__syscall_trace_entry
 
@@ -1150,11 +1149,10 @@ __entry_work_notifysig:
        # perform syscall entry tracing
 __syscall_trace_entry:
        LEDS            0x6320
-       setlos.p        #0,gr8
-       call            do_syscall_trace
+       call            syscall_trace_entry
 
-       ldi             @(gr28,#REG_SYSCALLNO),gr7
-       lddi            @(gr28,#REG_GR(8)) ,gr8
+       lddi.p          @(gr28,#REG_GR(8)) ,gr8
+       ori             gr8,#0,gr7              ; syscall_trace_entry() returned new syscallno
        lddi            @(gr28,#REG_GR(10)),gr10
        lddi.p          @(gr28,#REG_GR(12)),gr12
 
@@ -1169,11 +1167,10 @@ __syscall_exit_work:
        beq             icc0,#1,__entry_work_pending
 
        movsg           psr,gr23
-       andi            gr23,#~PSR_PIL,gr23     ; could let do_syscall_trace() call schedule()
+       andi            gr23,#~PSR_PIL,gr23     ; could let syscall_trace_exit() call schedule()
        movgs           gr23,psr
 
-       setlos.p        #1,gr8
-       call            do_syscall_trace
+       call            syscall_trace_exit
        bra             __entry_resume_userspace
 
 __syscall_badsys:
index 5e7d401d21e7d6455883f118213a040e97754fbf..60eeed3694c0764d2d0b9d7d3a0c2cffe14b9a89 100644 (file)
@@ -19,6 +19,9 @@
 #include <linux/user.h>
 #include <linux/security.h>
 #include <linux/signal.h>
+#include <linux/regset.h>
+#include <linux/elf.h>
+#include <linux/tracehook.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
  * in exit.c or in signal.c.
  */
 
+/*
+ * retrieve the contents of FRV userspace general registers
+ */
+static int genregs_get(struct task_struct *target,
+                      const struct user_regset *regset,
+                      unsigned int pos, unsigned int count,
+                      void *kbuf, void __user *ubuf)
+{
+       const struct user_int_regs *iregs = &target->thread.user->i;
+       int ret;
+
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 iregs, 0, sizeof(*iregs));
+       if (ret < 0)
+               return ret;
+
+       return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+                                       sizeof(*iregs), -1);
+}
+
+/*
+ * update the contents of the FRV userspace general registers
+ */
+static int genregs_set(struct task_struct *target,
+                      const struct user_regset *regset,
+                      unsigned int pos, unsigned int count,
+                      const void *kbuf, const void __user *ubuf)
+{
+       struct user_int_regs *iregs = &target->thread.user->i;
+       unsigned int offs_gr0, offs_gr1;
+       int ret;
+
+       /* not allowed to set PSR or __status */
+       if (pos < offsetof(struct user_int_regs, psr) + sizeof(long) &&
+           pos + count > offsetof(struct user_int_regs, psr))
+               return -EIO;
+
+       if (pos < offsetof(struct user_int_regs, __status) + sizeof(long) &&
+           pos + count > offsetof(struct user_int_regs, __status))
+               return -EIO;
+
+       /* set the control regs */
+       offs_gr0 = offsetof(struct user_int_regs, gr[0]);
+       offs_gr1 = offsetof(struct user_int_regs, gr[1]);
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                iregs, 0, offs_gr0);
+       if (ret < 0)
+               return ret;
+
+       /* skip GR0/TBR */
+       ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+                                       offs_gr0, offs_gr1);
+       if (ret < 0)
+               return ret;
+
+       /* set the general regs */
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &iregs->gr[1], offs_gr1, sizeof(*iregs));
+       if (ret < 0)
+               return ret;
+
+       return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+                                       sizeof(*iregs), -1);
+}
+
+/*
+ * retrieve the contents of FRV userspace FP/Media registers
+ */
+static int fpmregs_get(struct task_struct *target,
+                      const struct user_regset *regset,
+                      unsigned int pos, unsigned int count,
+                      void *kbuf, void __user *ubuf)
+{
+       const struct user_fpmedia_regs *fpregs = &target->thread.user->f;
+       int ret;
+
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 fpregs, 0, sizeof(*fpregs));
+       if (ret < 0)
+               return ret;
+
+       return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+                                       sizeof(*fpregs), -1);
+}
+
+/*
+ * update the contents of the FRV userspace FP/Media registers
+ */
+static int fpmregs_set(struct task_struct *target,
+                      const struct user_regset *regset,
+                      unsigned int pos, unsigned int count,
+                      const void *kbuf, const void __user *ubuf)
+{
+       struct user_fpmedia_regs *fpregs = &target->thread.user->f;
+       int ret;
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                fpregs, 0, sizeof(*fpregs));
+       if (ret < 0)
+               return ret;
+
+       return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+                                       sizeof(*fpregs), -1);
+}
+
+/*
+ * determine if the FP/Media registers have actually been used
+ */
+static int fpmregs_active(struct task_struct *target,
+                         const struct user_regset *regset)
+{
+       return tsk_used_math(target) ? regset->n : 0;
+}
+
+/*
+ * Define the register sets available on the FRV under Linux
+ */
+enum frv_regset {
+       REGSET_GENERAL,
+       REGSET_FPMEDIA,
+};
+
+static const struct user_regset frv_regsets[] = {
+       /*
+        * General register format is:
+        *      PSR, ISR, CCR, CCCR, LR, LCR, PC, (STATUS), SYSCALLNO, ORIG_G8
+        *      GNER0-1, IACC0, TBR, GR1-63
+        */
+       [REGSET_GENERAL] = {
+               .core_note_type = NT_PRSTATUS,
+               .n              = ELF_NGREG,
+               .size           = sizeof(long),
+               .align          = sizeof(long),
+               .get            = genregs_get,
+               .set            = genregs_set,
+       },
+       /*
+        * FPU/Media register format is:
+        *      FR0-63, FNER0-1, MSR0-1, ACC0-7, ACCG0-8, FSR
+        */
+       [REGSET_FPMEDIA] = {
+               .core_note_type = NT_PRFPREG,
+               .n              = sizeof(struct user_fpmedia_regs) / sizeof(long),
+               .size           = sizeof(long),
+               .align          = sizeof(long),
+               .get            = fpmregs_get,
+               .set            = fpmregs_set,
+               .active         = fpmregs_active,
+       },
+};
+
+static const struct user_regset_view user_frv_native_view = {
+       .name           = "frv",
+       .e_machine      = EM_FRV,
+       .regsets        = frv_regsets,
+       .n              = ARRAY_SIZE(frv_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+       return &user_frv_native_view;
+}
+
 /*
  * Get contents of register REGNO in task TASK.
  */
@@ -68,41 +234,24 @@ static inline int put_reg(struct task_struct *task, int regno,
        }
 }
 
-/*
- * check that an address falls within the bounds of the target process's memory
- * mappings
- */
-static inline int is_user_addr_valid(struct task_struct *child,
-                                    unsigned long start, unsigned long len)
-{
-#ifdef CONFIG_MMU
-       if (start >= PAGE_OFFSET || len > PAGE_OFFSET - start)
-               return -EIO;
-       return 0;
-#else
-       struct vm_area_struct *vma;
-
-       vma = find_vma(child->mm, start);
-       if (vma && start >= vma->vm_start && start + len <= vma->vm_end)
-               return 0;
-
-       return -EIO;
-#endif
-}
-
 /*
  * Called by kernel/ptrace.c when detaching..
  *
  * Control h/w single stepping
  */
-void ptrace_disable(struct task_struct *child)
+void user_enable_single_step(struct task_struct *child)
+{
+       child->thread.frame0->__status |= REG__STATUS_STEP;
+}
+
+void user_disable_single_step(struct task_struct *child)
 {
        child->thread.frame0->__status &= ~REG__STATUS_STEP;
 }
 
-void ptrace_enable(struct task_struct *child)
+void ptrace_disable(struct task_struct *child)
 {
-       child->thread.frame0->__status |= REG__STATUS_STEP;
+       user_disable_single_step(child);
 }
 
 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
@@ -111,15 +260,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        int ret;
 
        switch (request) {
-               /* when I and D space are separate, these will need to be fixed. */
-       case PTRACE_PEEKTEXT: /* read word at location addr. */
-       case PTRACE_PEEKDATA:
-               ret = -EIO;
-               if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
-                       break;
-               ret = generic_ptrace_peekdata(child, addr, data);
-               break;
-
                /* read the word at location addr in the USER area. */
        case PTRACE_PEEKUSR: {
                tmp = 0;
@@ -163,15 +303,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
        }
 
-               /* when I and D space are separate, this will have to be fixed. */
-       case PTRACE_POKETEXT: /* write the word at location addr. */
-       case PTRACE_POKEDATA:
-               ret = -EIO;
-               if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
-                       break;
-               ret = generic_ptrace_pokedata(child, addr, data);
-               break;
-
        case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
                ret = -EIO;
                if ((addr & 3) || addr < 0)
@@ -179,7 +310,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 
                ret = 0;
                switch (addr >> 2) {
-               case 0 ... PT__END-1:
+               case 0 ... PT__END - 1:
                        ret = put_reg(child, addr >> 2, data);
                        break;
 
@@ -189,95 +320,29 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                }
                break;
 
-       case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
-       case PTRACE_CONT: /* restart after signal. */
-               ret = -EIO;
-               if (!valid_signal(data))
-                       break;
-               if (request == PTRACE_SYSCALL)
-                       set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-               else
-                       clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-               child->exit_code = data;
-               ptrace_disable(child);
-               wake_up_process(child);
-               ret = 0;
-               break;
-
-               /* make the child exit.  Best I can do is send it a sigkill.
-                * perhaps it should be put in the status that it wants to
-                * exit.
-                */
-       case PTRACE_KILL:
-               ret = 0;
-               if (child->exit_state == EXIT_ZOMBIE)   /* already dead */
-                       break;
-               child->exit_code = SIGKILL;
-               clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-               ptrace_disable(child);
-               wake_up_process(child);
-               break;
-
-       case PTRACE_SINGLESTEP:  /* set the trap flag. */
-               ret = -EIO;
-               if (!valid_signal(data))
-                       break;
-               clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-               ptrace_enable(child);
-               child->exit_code = data;
-               wake_up_process(child);
-               ret = 0;
-               break;
-
-       case PTRACE_DETACH:     /* detach a process that was attached. */
-               ret = ptrace_detach(child, data);
-               break;
-
-       case PTRACE_GETREGS: { /* Get all integer regs from the child. */
-               int i;
-               for (i = 0; i < PT__GPEND; i++) {
-                       tmp = get_reg(child, i);
-                       if (put_user(tmp, (unsigned long *) data)) {
-                               ret = -EFAULT;
-                               break;
-                       }
-                       data += sizeof(long);
-               }
-               ret = 0;
-               break;
-       }
-
-       case PTRACE_SETREGS: { /* Set all integer regs in the child. */
-               int i;
-               for (i = 0; i < PT__GPEND; i++) {
-                       if (get_user(tmp, (unsigned long *) data)) {
-                               ret = -EFAULT;
-                               break;
-                       }
-                       put_reg(child, i, tmp);
-                       data += sizeof(long);
-               }
-               ret = 0;
-               break;
-       }
-
-       case PTRACE_GETFPREGS: { /* Get the child FP/Media state. */
-               ret = 0;
-               if (copy_to_user((void *) data,
-                                &child->thread.user->f,
-                                sizeof(child->thread.user->f)))
-                       ret = -EFAULT;
-               break;
-       }
-
-       case PTRACE_SETFPREGS: { /* Set the child FP/Media state. */
-               ret = 0;
-               if (copy_from_user(&child->thread.user->f,
-                                  (void *) data,
-                                  sizeof(child->thread.user->f)))
-                       ret = -EFAULT;
-               break;
-       }
+       case PTRACE_GETREGS:    /* Get all integer regs from the child. */
+               return copy_regset_to_user(child, &user_frv_native_view,
+                                          REGSET_GENERAL,
+                                          0, sizeof(child->thread.user->i),
+                                          (void __user *)data);
+
+       case PTRACE_SETREGS:    /* Set all integer regs in the child. */
+               return copy_regset_from_user(child, &user_frv_native_view,
+                                            REGSET_GENERAL,
+                                            0, sizeof(child->thread.user->i),
+                                            (const void __user *)data);
+
+       case PTRACE_GETFPREGS:  /* Get the child FP/Media state. */
+               return copy_regset_to_user(child, &user_frv_native_view,
+                                          REGSET_FPMEDIA,
+                                          0, sizeof(child->thread.user->f),
+                                          (void __user *)data);
+
+       case PTRACE_SETFPREGS:  /* Set the child FP/Media state. */
+               return copy_regset_from_user(child, &user_frv_native_view,
+                                            REGSET_FPMEDIA,
+                                            0, sizeof(child->thread.user->f),
+                                            (const void __user *)data);
 
        case PTRACE_GETFDPIC:
                tmp = 0;
@@ -300,414 +365,36 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        default:
-               ret = -EIO;
+               ret = ptrace_request(child, request, addr, data);
                break;
        }
        return ret;
 }
 
-int __nongprelbss kstrace;
-
-static const struct {
-       const char      *name;
-       unsigned        argmask;
-} __syscall_name_table[NR_syscalls] = {
-       [0]     = { "restart_syscall"                   },
-       [1]     = { "exit",             0x000001        },
-       [2]     = { "fork",             0xffffff        },
-       [3]     = { "read",             0x000141        },
-       [4]     = { "write",            0x000141        },
-       [5]     = { "open",             0x000235        },
-       [6]     = { "close",            0x000001        },
-       [7]     = { "waitpid",          0x000141        },
-       [8]     = { "creat",            0x000025        },
-       [9]     = { "link",             0x000055        },
-       [10]    = { "unlink",           0x000005        },
-       [11]    = { "execve",           0x000445        },
-       [12]    = { "chdir",            0x000005        },
-       [13]    = { "time",             0x000004        },
-       [14]    = { "mknod",            0x000325        },
-       [15]    = { "chmod",            0x000025        },
-       [16]    = { "lchown",           0x000025        },
-       [17]    = { "break" },
-       [18]    = { "oldstat",          0x000045        },
-       [19]    = { "lseek",            0x000131        },
-       [20]    = { "getpid",           0xffffff        },
-       [21]    = { "mount",            0x043555        },
-       [22]    = { "umount",           0x000005        },
-       [23]    = { "setuid",           0x000001        },
-       [24]    = { "getuid",           0xffffff        },
-       [25]    = { "stime",            0x000004        },
-       [26]    = { "ptrace",           0x004413        },
-       [27]    = { "alarm",            0x000001        },
-       [28]    = { "oldfstat",         0x000041        },
-       [29]    = { "pause",            0xffffff        },
-       [30]    = { "utime",            0x000045        },
-       [31]    = { "stty" },
-       [32]    = { "gtty" },
-       [33]    = { "access",           0x000025        },
-       [34]    = { "nice",             0x000001        },
-       [35]    = { "ftime" },
-       [36]    = { "sync",             0xffffff        },
-       [37]    = { "kill",             0x000011        },
-       [38]    = { "rename",           0x000055        },
-       [39]    = { "mkdir",            0x000025        },
-       [40]    = { "rmdir",            0x000005        },
-       [41]    = { "dup",              0x000001        },
-       [42]    = { "pipe",             0x000004        },
-       [43]    = { "times",            0x000004        },
-       [44]    = { "prof" },
-       [45]    = { "brk",              0x000004        },
-       [46]    = { "setgid",           0x000001        },
-       [47]    = { "getgid",           0xffffff        },
-       [48]    = { "signal",           0x000041        },
-       [49]    = { "geteuid",          0xffffff        },
-       [50]    = { "getegid",          0xffffff        },
-       [51]    = { "acct",             0x000005        },
-       [52]    = { "umount2",          0x000035        },
-       [53]    = { "lock" },
-       [54]    = { "ioctl",            0x000331        },
-       [55]    = { "fcntl",            0x000331        },
-       [56]    = { "mpx" },
-       [57]    = { "setpgid",          0x000011        },
-       [58]    = { "ulimit" },
-       [60]    = { "umask",            0x000002        },
-       [61]    = { "chroot",           0x000005        },
-       [62]    = { "ustat",            0x000043        },
-       [63]    = { "dup2",             0x000011        },
-       [64]    = { "getppid",          0xffffff        },
-       [65]    = { "getpgrp",          0xffffff        },
-       [66]    = { "setsid",           0xffffff        },
-       [67]    = { "sigaction" },
-       [68]    = { "sgetmask" },
-       [69]    = { "ssetmask" },
-       [70]    = { "setreuid" },
-       [71]    = { "setregid" },
-       [72]    = { "sigsuspend" },
-       [73]    = { "sigpending" },
-       [74]    = { "sethostname" },
-       [75]    = { "setrlimit" },
-       [76]    = { "getrlimit" },
-       [77]    = { "getrusage" },
-       [78]    = { "gettimeofday" },
-       [79]    = { "settimeofday" },
-       [80]    = { "getgroups" },
-       [81]    = { "setgroups" },
-       [82]    = { "select" },
-       [83]    = { "symlink" },
-       [84]    = { "oldlstat" },
-       [85]    = { "readlink" },
-       [86]    = { "uselib" },
-       [87]    = { "swapon" },
-       [88]    = { "reboot" },
-       [89]    = { "readdir" },
-       [91]    = { "munmap",           0x000034        },
-       [92]    = { "truncate" },
-       [93]    = { "ftruncate" },
-       [94]    = { "fchmod" },
-       [95]    = { "fchown" },
-       [96]    = { "getpriority" },
-       [97]    = { "setpriority" },
-       [99]    = { "statfs" },
-       [100]   = { "fstatfs" },
-       [102]   = { "socketcall" },
-       [103]   = { "syslog" },
-       [104]   = { "setitimer" },
-       [105]   = { "getitimer" },
-       [106]   = { "stat" },
-       [107]   = { "lstat" },
-       [108]   = { "fstat" },
-       [111]   = { "vhangup" },
-       [114]   = { "wait4" },
-       [115]   = { "swapoff" },
-       [116]   = { "sysinfo" },
-       [117]   = { "ipc" },
-       [118]   = { "fsync" },
-       [119]   = { "sigreturn" },
-       [120]   = { "clone" },
-       [121]   = { "setdomainname" },
-       [122]   = { "uname" },
-       [123]   = { "modify_ldt" },
-       [123]   = { "cacheflush" },
-       [124]   = { "adjtimex" },
-       [125]   = { "mprotect" },
-       [126]   = { "sigprocmask" },
-       [127]   = { "create_module" },
-       [128]   = { "init_module" },
-       [129]   = { "delete_module" },
-       [130]   = { "get_kernel_syms" },
-       [131]   = { "quotactl" },
-       [132]   = { "getpgid" },
-       [133]   = { "fchdir" },
-       [134]   = { "bdflush" },
-       [135]   = { "sysfs" },
-       [136]   = { "personality" },
-       [137]   = { "afs_syscall" },
-       [138]   = { "setfsuid" },
-       [139]   = { "setfsgid" },
-       [140]   = { "_llseek",                  0x014331        },
-       [141]   = { "getdents" },
-       [142]   = { "_newselect",               0x000141        },
-       [143]   = { "flock" },
-       [144]   = { "msync" },
-       [145]   = { "readv" },
-       [146]   = { "writev" },
-       [147]   = { "getsid",                   0x000001        },
-       [148]   = { "fdatasync",                0x000001        },
-       [149]   = { "_sysctl",                  0x000004        },
-       [150]   = { "mlock" },
-       [151]   = { "munlock" },
-       [152]   = { "mlockall" },
-       [153]   = { "munlockall" },
-       [154]   = { "sched_setparam" },
-       [155]   = { "sched_getparam" },
-       [156]   = { "sched_setscheduler" },
-       [157]   = { "sched_getscheduler" },
-       [158]   = { "sched_yield" },
-       [159]   = { "sched_get_priority_max" },
-       [160]   = { "sched_get_priority_min" },
-       [161]   = { "sched_rr_get_interval" },
-       [162]   = { "nanosleep",                0x000044        },
-       [163]   = { "mremap" },
-       [164]   = { "setresuid" },
-       [165]   = { "getresuid" },
-       [166]   = { "vm86" },
-       [167]   = { "query_module" },
-       [168]   = { "poll" },
-       [169]   = { "nfsservctl" },
-       [170]   = { "setresgid" },
-       [171]   = { "getresgid" },
-       [172]   = { "prctl",                    0x333331        },
-       [173]   = { "rt_sigreturn",             0xffffff        },
-       [174]   = { "rt_sigaction",             0x001441        },
-       [175]   = { "rt_sigprocmask",           0x001441        },
-       [176]   = { "rt_sigpending",            0x000014        },
-       [177]   = { "rt_sigtimedwait",          0x001444        },
-       [178]   = { "rt_sigqueueinfo",          0x000411        },
-       [179]   = { "rt_sigsuspend",            0x000014        },
-       [180]   = { "pread",                    0x003341        },
-       [181]   = { "pwrite",                   0x003341        },
-       [182]   = { "chown",                    0x000115        },
-       [183]   = { "getcwd" },
-       [184]   = { "capget" },
-       [185]   = { "capset" },
-       [186]   = { "sigaltstack" },
-       [187]   = { "sendfile" },
-       [188]   = { "getpmsg" },
-       [189]   = { "putpmsg" },
-       [190]   = { "vfork",                    0xffffff        },
-       [191]   = { "ugetrlimit" },
-       [192]   = { "mmap2",                    0x313314        },
-       [193]   = { "truncate64" },
-       [194]   = { "ftruncate64" },
-       [195]   = { "stat64",                   0x000045        },
-       [196]   = { "lstat64",                  0x000045        },
-       [197]   = { "fstat64",                  0x000041        },
-       [198]   = { "lchown32" },
-       [199]   = { "getuid32",                 0xffffff        },
-       [200]   = { "getgid32",                 0xffffff        },
-       [201]   = { "geteuid32",                0xffffff        },
-       [202]   = { "getegid32",                0xffffff        },
-       [203]   = { "setreuid32" },
-       [204]   = { "setregid32" },
-       [205]   = { "getgroups32" },
-       [206]   = { "setgroups32" },
-       [207]   = { "fchown32" },
-       [208]   = { "setresuid32" },
-       [209]   = { "getresuid32" },
-       [210]   = { "setresgid32" },
-       [211]   = { "getresgid32" },
-       [212]   = { "chown32" },
-       [213]   = { "setuid32" },
-       [214]   = { "setgid32" },
-       [215]   = { "setfsuid32" },
-       [216]   = { "setfsgid32" },
-       [217]   = { "pivot_root" },
-       [218]   = { "mincore" },
-       [219]   = { "madvise" },
-       [220]   = { "getdents64" },
-       [221]   = { "fcntl64" },
-       [223]   = { "security" },
-       [224]   = { "gettid" },
-       [225]   = { "readahead" },
-       [226]   = { "setxattr" },
-       [227]   = { "lsetxattr" },
-       [228]   = { "fsetxattr" },
-       [229]   = { "getxattr" },
-       [230]   = { "lgetxattr" },
-       [231]   = { "fgetxattr" },
-       [232]   = { "listxattr" },
-       [233]   = { "llistxattr" },
-       [234]   = { "flistxattr" },
-       [235]   = { "removexattr" },
-       [236]   = { "lremovexattr" },
-       [237]   = { "fremovexattr" },
-       [238]   = { "tkill" },
-       [239]   = { "sendfile64" },
-       [240]   = { "futex" },
-       [241]   = { "sched_setaffinity" },
-       [242]   = { "sched_getaffinity" },
-       [243]   = { "set_thread_area" },
-       [244]   = { "get_thread_area" },
-       [245]   = { "io_setup" },
-       [246]   = { "io_destroy" },
-       [247]   = { "io_getevents" },
-       [248]   = { "io_submit" },
-       [249]   = { "io_cancel" },
-       [250]   = { "fadvise64" },
-       [252]   = { "exit_group",               0x000001        },
-       [253]   = { "lookup_dcookie" },
-       [254]   = { "epoll_create" },
-       [255]   = { "epoll_ctl" },
-       [256]   = { "epoll_wait" },
-       [257]   = { "remap_file_pages" },
-       [258]   = { "set_tid_address" },
-       [259]   = { "timer_create" },
-       [260]   = { "timer_settime" },
-       [261]   = { "timer_gettime" },
-       [262]   = { "timer_getoverrun" },
-       [263]   = { "timer_delete" },
-       [264]   = { "clock_settime" },
-       [265]   = { "clock_gettime" },
-       [266]   = { "clock_getres" },
-       [267]   = { "clock_nanosleep" },
-       [268]   = { "statfs64" },
-       [269]   = { "fstatfs64" },
-       [270]   = { "tgkill" },
-       [271]   = { "utimes" },
-       [272]   = { "fadvise64_64" },
-       [273]   = { "vserver" },
-       [274]   = { "mbind" },
-       [275]   = { "get_mempolicy" },
-       [276]   = { "set_mempolicy" },
-       [277]   = { "mq_open" },
-       [278]   = { "mq_unlink" },
-       [279]   = { "mq_timedsend" },
-       [280]   = { "mq_timedreceive" },
-       [281]   = { "mq_notify" },
-       [282]   = { "mq_getsetattr" },
-       [283]   = { "sys_kexec_load" },
-};
-
-asmlinkage void do_syscall_trace(int leaving)
+/*
+ * handle tracing of system call entry
+ * - return the revised system call number or ULONG_MAX to cause ENOSYS
+ */
+asmlinkage unsigned long syscall_trace_entry(void)
 {
-#if 0
-       unsigned long *argp;
-       const char *name;
-       unsigned argmask;
-       char buffer[16];
-
-       if (!kstrace)
-               return;
-
-       if (!current->mm)
-               return;
-
-       if (__frame->gr7 == __NR_close)
-               return;
-
-#if 0
-       if (__frame->gr7 != __NR_mmap2 &&
-           __frame->gr7 != __NR_vfork &&
-           __frame->gr7 != __NR_execve &&
-           __frame->gr7 != __NR_exit)
-               return;
-#endif
-
-       argmask = 0;
-       name = NULL;
-       if (__frame->gr7 < NR_syscalls) {
-               name = __syscall_name_table[__frame->gr7].name;
-               argmask = __syscall_name_table[__frame->gr7].argmask;
-       }
-       if (!name) {
-               sprintf(buffer, "sys_%lx", __frame->gr7);
-               name = buffer;
-       }
-
-       if (!leaving) {
-               if (!argmask) {
-                       printk(KERN_CRIT "[%d] %s(%lx,%lx,%lx,%lx,%lx,%lx)\n",
-                              current->pid,
-                              name,
-                              __frame->gr8,
-                              __frame->gr9,
-                              __frame->gr10,
-                              __frame->gr11,
-                              __frame->gr12,
-                              __frame->gr13);
-               }
-               else if (argmask == 0xffffff) {
-                       printk(KERN_CRIT "[%d] %s()\n",
-                              current->pid,
-                              name);
-               }
-               else {
-                       printk(KERN_CRIT "[%d] %s(",
-                              current->pid,
-                              name);
-
-                       argp = &__frame->gr8;
-
-                       do {
-                               switch (argmask & 0xf) {
-                               case 1:
-                                       printk("%ld", (long) *argp);
-                                       break;
-                               case 2:
-                                       printk("%lo", *argp);
-                                       break;
-                               case 3:
-                                       printk("%lx", *argp);
-                                       break;
-                               case 4:
-                                       printk("%p", (void *) *argp);
-                                       break;
-                               case 5:
-                                       printk("\"%s\"", (char *) *argp);
-                                       break;
-                               }
-
-                               argp++;
-                               argmask >>= 4;
-                               if (argmask)
-                                       printk(",");
-
-                       } while (argmask);
-
-                       printk(")\n");
-               }
-       }
-       else {
-               if ((int)__frame->gr8 > -4096 && (int)__frame->gr8 < 4096)
-                       printk(KERN_CRIT "[%d] %s() = %ld\n", current->pid, name, __frame->gr8);
-               else
-                       printk(KERN_CRIT "[%d] %s() = %lx\n", current->pid, name, __frame->gr8);
+       __frame->__status |= REG__STATUS_SYSC_ENTRY;
+       if (tracehook_report_syscall_entry(__frame)) {
+               /* tracing decided this syscall should not happen, so
+                * We'll return a bogus call number to get an ENOSYS
+                * error, but leave the original number in
+                * __frame->syscallno
+                */
+               return ULONG_MAX;
        }
-       return;
-#endif
-
-       if (!test_thread_flag(TIF_SYSCALL_TRACE))
-               return;
-
-       if (!(current->ptrace & PT_PTRACED))
-               return;
 
-       /* we need to indicate entry or exit to strace */
-       if (leaving)
-               __frame->__status |= REG__STATUS_SYSC_EXIT;
-       else
-               __frame->__status |= REG__STATUS_SYSC_ENTRY;
-
-       ptrace_notify(SIGTRAP);
+       return __frame->syscallno;
+}
 
-       /*
-        * this isn't the same as continuing with a signal, but it will do
-        * for normal use.  strace only continues with a signal if the
-        * stopping signal is not SIGTRAP.  -brl
-        */
-       if (current->exit_code) {
-               send_sig(current->exit_code, current, 1);
-               current->exit_code = 0;
-       }
+/*
+ * handle tracing of system call exit
+ */
+asmlinkage void syscall_trace_exit(void)
+{
+       __frame->__status |= REG__STATUS_SYSC_EXIT;
+       tracehook_report_syscall_exit(__frame, 0);
 }
index 3bdb368292a8c8a53f9c00b3aa39dc169420b095..4a7a62c6e7833ed91a9087b3762cb3d0ba24df5f 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/unistd.h>
 #include <linux/personality.h>
 #include <linux/freezer.h>
+#include <linux/tracehook.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -516,6 +517,9 @@ static void do_signal(void)
                         * clear the TIF_RESTORE_SIGMASK flag */
                        if (test_thread_flag(TIF_RESTORE_SIGMASK))
                                clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+                       tracehook_signal_handler(signr, &info, &ka, __frame,
+                                                test_thread_flag(TIF_SINGLESTEP));
                }
 
                return;
@@ -564,4 +568,10 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags)
        if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
                do_signal();
 
+       /* deal with notification on about to resume userspace execution */
+       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+               clear_thread_flag(TIF_NOTIFY_RESUME);
+               tracehook_notify_resume(__frame);
+       }
+
 } /* end do_notify_resume() */
index 9fb771a20df3cf831f4779aba7859e4c865928de..374f88d6cc00d0c32fffd03f5c7b3a8b1af395ee 100644 (file)
@@ -23,8 +23,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
        char *p, ch;
        long err = -EFAULT;
 
-       if (count < 0)
-               BUG();
+       BUG_ON(count < 0);
 
        p = dst;
 
@@ -76,8 +75,7 @@ long strnlen_user(const char __user *src, long count)
        long err = 0;
        char ch;
 
-       if (count < 0)
-               BUG();
+       BUG_ON(count < 0);
 
 #ifndef CONFIG_MMU
        if ((unsigned long) src < memory_start)
index 52ff9aec799d29c84a649aa128ccb985b60cd68c..4e1ba0b15443a86ac4f5fd12bb34b552c392a768 100644 (file)
@@ -116,8 +116,7 @@ EXPORT_SYMBOL(dma_free_coherent);
 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
                          enum dma_data_direction direction)
 {
-       if (direction == DMA_NONE)
-                BUG();
+       BUG_ON(direction == DMA_NONE);
 
        frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
 
@@ -151,8 +150,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
                frv_cache_wback_inv(sg_dma_address(&sg[i]),
                                    sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]));
 
-       if (direction == DMA_NONE)
-                BUG();
+       BUG_ON(direction == DMA_NONE);
 
        return nents;
 }
index 3ddedebc4eb3db1a1bbc25a7a10dd2e1fd1e66e5..45954f0813dc9e75a5e15f06e5cdb13776170c30 100644 (file)
@@ -48,8 +48,7 @@ EXPORT_SYMBOL(dma_free_coherent);
 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
                          enum dma_data_direction direction)
 {
-       if (direction == DMA_NONE)
-                BUG();
+       BUG_ON(direction == DMA_NONE);
 
        frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
 
@@ -81,8 +80,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
        void *vaddr;
        int i;
 
-       if (direction == DMA_NONE)
-                BUG();
+       BUG_ON(direction == DMA_NONE);
 
        dampr2 = __get_DAMPR(2);
 
index 4542651e6acb1e82f3f4665ebca0705a9e6161aa..5f43697aed3018f5d821a10d641642de88473c32 100644 (file)
@@ -371,6 +371,7 @@ struct kvm_vcpu_arch {
        int last_run_cpu;
        int vmm_tr_slot;
        int vm_tr_slot;
+       int sn_rtc_tr_slot;
 
 #define KVM_MP_STATE_RUNNABLE          0
 #define KVM_MP_STATE_UNINITIALIZED     1
@@ -465,6 +466,7 @@ struct kvm_arch {
        unsigned long   vmm_init_rr;
 
        int             online_vcpus;
+       int             is_sn2;
 
        struct kvm_ioapic *vioapic;
        struct kvm_vm_stat stat;
@@ -472,6 +474,7 @@ struct kvm_arch {
 
        struct list_head assigned_dev_head;
        struct iommu_domain *iommu_domain;
+       int iommu_flags;
        struct hlist_head irq_ack_notifier_list;
 
        unsigned long irq_sources_bitmap;
@@ -578,6 +581,8 @@ struct kvm_vmm_info{
        kvm_vmm_entry   *vmm_entry;
        kvm_tramp_entry *tramp_entry;
        unsigned long   vmm_ivt;
+       unsigned long   patch_mov_ar;
+       unsigned long   patch_mov_ar_sn2;
 };
 
 int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
@@ -585,7 +590,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu);
 int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 void kvm_sal_emul(struct kvm_vcpu *vcpu);
 
-static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {}
 #endif /* __ASSEMBLY__*/
 
 #endif
index 7a9bff47564f10c3165123e378ce226e90d55f2b..0a9cc73d35c78fac88f4733511021849a0b2a248 100644 (file)
 #define PAGE_GATE      __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
 #define PAGE_KERNEL    __pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX)
 #define PAGE_KERNELRX  __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+#define PAGE_KERNEL_UC __pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX | \
+                                _PAGE_MA_UC)
 
 # ifndef __ASSEMBLY__
 
index acc4d19ae62ad17fd956f914a0829a6bda3b8589..b448197728be5cc22b91fd92f2c49d2cee0f455f 100644 (file)
@@ -610,6 +610,9 @@ static struct irqaction ipi_irqaction = {
        .name =         "IPI"
 };
 
+/*
+ * KVM uses this interrupt to force a cpu out of guest mode
+ */
 static struct irqaction resched_irqaction = {
        .handler =      dummy_handler,
        .flags =        IRQF_DISABLED,
index 0a2d6b86075a66ce45908ee6fc66f2cd981b4c8c..64d5209378749ff7175f8218fdbb546ee6310315 100644 (file)
@@ -23,7 +23,7 @@ if VIRTUALIZATION
 
 config KVM
        tristate "Kernel-based Virtual Machine (KVM) support"
-       depends on HAVE_KVM && EXPERIMENTAL
+       depends on HAVE_KVM && MODULES && EXPERIMENTAL
        # for device assignment:
        depends on PCI
        select PREEMPT_NOTIFIERS
index d20a5db4c4dde90674debfd2eb370b4434870b89..80c57b0a21c479ee593fb095bc8429089eced2f5 100644 (file)
@@ -41,6 +41,9 @@
 #include <asm/div64.h>
 #include <asm/tlb.h>
 #include <asm/elf.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/clksupport.h>
+#include <asm/sn/shub_mmr.h>
 
 #include "misc.h"
 #include "vti.h"
@@ -65,6 +68,16 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { NULL }
 };
 
+static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu)
+{
+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
+       if (vcpu->kvm->arch.is_sn2)
+               return rtc_time();
+       else
+#endif
+               return ia64_getreg(_IA64_REG_AR_ITC);
+}
+
 static void kvm_flush_icache(unsigned long start, unsigned long len)
 {
        int l;
@@ -119,8 +132,7 @@ void kvm_arch_hardware_enable(void *garbage)
        unsigned long saved_psr;
        int slot;
 
-       pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
-                               PAGE_KERNEL));
+       pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
        local_irq_save(saved_psr);
        slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
        local_irq_restore(saved_psr);
@@ -283,6 +295,18 @@ static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
 }
 
+static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
+{
+       struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+       if (!test_and_set_bit(vector, &vpd->irr[0])) {
+               vcpu->arch.irq_new_pending = 1;
+               kvm_vcpu_kick(vcpu);
+               return 1;
+       }
+       return 0;
+}
+
 /*
  *  offset: address offset to IPI space.
  *  value:  deliver value.
@@ -292,20 +316,20 @@ static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
 {
        switch (dm) {
        case SAPIC_FIXED:
-               kvm_apic_set_irq(vcpu, vector, 0);
                break;
        case SAPIC_NMI:
-               kvm_apic_set_irq(vcpu, 2, 0);
+               vector = 2;
                break;
        case SAPIC_EXTINT:
-               kvm_apic_set_irq(vcpu, 0, 0);
+               vector = 0;
                break;
        case SAPIC_INIT:
        case SAPIC_PMI:
        default:
                printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
-               break;
+               return;
        }
+       __apic_accept_irq(vcpu, vector);
 }
 
 static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
@@ -413,6 +437,23 @@ static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 1;
 }
 
+static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
+{
+       unsigned long pte, rtc_phys_addr, map_addr;
+       int slot;
+
+       map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
+       rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
+       pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
+       slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
+       vcpu->arch.sn_rtc_tr_slot = slot;
+       if (slot < 0) {
+               printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
+               slot = 0;
+       }
+       return slot;
+}
+
 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 {
 
@@ -426,7 +467,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 
        if (irqchip_in_kernel(vcpu->kvm)) {
 
-               vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
+               vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset;
 
                if (time_after(vcpu_now_itc, vpd->itm)) {
                        vcpu->arch.timer_check = 1;
@@ -447,10 +488,10 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
                hrtimer_cancel(p_ht);
                vcpu->arch.ht_active = 0;
 
-               if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
+               if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
+                               kvm_cpu_has_pending_timer(vcpu))
                        if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
-                               vcpu->arch.mp_state =
-                                       KVM_MP_STATE_RUNNABLE;
+                               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
                if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
                        return -EINTR;
@@ -551,22 +592,35 @@ static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
        if (r < 0)
                goto out;
        vcpu->arch.vm_tr_slot = r;
+
+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
+       if (kvm->arch.is_sn2) {
+               r = kvm_sn2_setup_mappings(vcpu);
+               if (r < 0)
+                       goto out;
+       }
+#endif
+
        r = 0;
 out:
        return r;
-
 }
 
 static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
 {
-
+       struct kvm *kvm = vcpu->kvm;
        ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
        ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
-
+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
+       if (kvm->arch.is_sn2)
+               ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
+#endif
 }
 
 static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
 {
+       unsigned long psr;
+       int r;
        int cpu = smp_processor_id();
 
        if (vcpu->arch.last_run_cpu != cpu ||
@@ -578,36 +632,27 @@ static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
 
        vcpu->arch.host_rr6 = ia64_get_rr(RR6);
        vti_set_rr6(vcpu->arch.vmm_rr);
-       return kvm_insert_vmm_mapping(vcpu);
+       local_irq_save(psr);
+       r = kvm_insert_vmm_mapping(vcpu);
+       local_irq_restore(psr);
+       return r;
 }
+
 static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
 {
        kvm_purge_vmm_mapping(vcpu);
        vti_set_rr6(vcpu->arch.host_rr6);
 }
 
-static int  vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        union context *host_ctx, *guest_ctx;
        int r;
 
-       /*Get host and guest context with guest address space.*/
-       host_ctx = kvm_get_host_context(vcpu);
-       guest_ctx = kvm_get_guest_context(vcpu);
-
-       r = kvm_vcpu_pre_transition(vcpu);
-       if (r < 0)
-               goto out;
-       kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
-       kvm_vcpu_post_transition(vcpu);
-       r = 0;
-out:
-       return r;
-}
-
-static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
-       int r;
+       /*
+        * down_read() may sleep and return with interrupts enabled
+        */
+       down_read(&vcpu->kvm->slots_lock);
 
 again:
        if (signal_pending(current)) {
@@ -616,26 +661,31 @@ again:
                goto out;
        }
 
-       /*
-        * down_read() may sleep and return with interrupts enabled
-        */
-       down_read(&vcpu->kvm->slots_lock);
-
        preempt_disable();
        local_irq_disable();
 
-       vcpu->guest_mode = 1;
+       /*Get host and guest context with guest address space.*/
+       host_ctx = kvm_get_host_context(vcpu);
+       guest_ctx = kvm_get_guest_context(vcpu);
+
+       clear_bit(KVM_REQ_KICK, &vcpu->requests);
+
+       r = kvm_vcpu_pre_transition(vcpu);
+       if (r < 0)
+               goto vcpu_run_fail;
+
+       up_read(&vcpu->kvm->slots_lock);
        kvm_guest_enter();
-       r = vti_vcpu_run(vcpu, kvm_run);
-       if (r < 0) {
-               local_irq_enable();
-               preempt_enable();
-               kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
-               goto out;
-       }
+
+       /*
+        * Transition to the guest
+        */
+       kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
+
+       kvm_vcpu_post_transition(vcpu);
 
        vcpu->arch.launched = 1;
-       vcpu->guest_mode = 0;
+       set_bit(KVM_REQ_KICK, &vcpu->requests);
        local_irq_enable();
 
        /*
@@ -646,9 +696,10 @@ again:
         */
        barrier();
        kvm_guest_exit();
-       up_read(&vcpu->kvm->slots_lock);
        preempt_enable();
 
+       down_read(&vcpu->kvm->slots_lock);
+
        r = kvm_handle_exit(kvm_run, vcpu);
 
        if (r > 0) {
@@ -657,12 +708,20 @@ again:
        }
 
 out:
+       up_read(&vcpu->kvm->slots_lock);
        if (r > 0) {
                kvm_resched(vcpu);
+               down_read(&vcpu->kvm->slots_lock);
                goto again;
        }
 
        return r;
+
+vcpu_run_fail:
+       local_irq_enable();
+       preempt_enable();
+       kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+       goto out;
 }
 
 static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
@@ -788,6 +847,9 @@ struct  kvm *kvm_arch_create_vm(void)
 
        if (IS_ERR(kvm))
                return ERR_PTR(-ENOMEM);
+
+       kvm->arch.is_sn2 = ia64_platform_is("sn2");
+
        kvm_init_vm(kvm);
 
        kvm->arch.online_vcpus = 0;
@@ -884,7 +946,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        RESTORE_REGS(saved_gp);
 
        vcpu->arch.irq_new_pending = 1;
-       vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC);
+       vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
        set_bit(KVM_REQ_RESUME, &vcpu->requests);
 
        vcpu_put(vcpu);
@@ -1043,10 +1105,6 @@ static void kvm_free_vmm_area(void)
        }
 }
 
-static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
-}
-
 static int vti_init_vpd(struct kvm_vcpu *vcpu)
 {
        int i;
@@ -1165,7 +1223,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
                regs->cr_iip = PALE_RESET_ENTRY;
 
                /*Initialize itc offset for vcpus*/
-               itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
+               itc_offset = 0UL - kvm_get_itc(vcpu);
                for (i = 0; i < kvm->arch.online_vcpus; i++) {
                        v = (struct kvm_vcpu *)((char *)vcpu +
                                        sizeof(struct kvm_vcpu_data) * i);
@@ -1237,6 +1295,7 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
 
        local_irq_save(psr);
        r = kvm_insert_vmm_mapping(vcpu);
+       local_irq_restore(psr);
        if (r)
                goto fail;
        r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
@@ -1254,13 +1313,11 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
                goto uninit;
 
        kvm_purge_vmm_mapping(vcpu);
-       local_irq_restore(psr);
 
        return 0;
 uninit:
        kvm_vcpu_uninit(vcpu);
 fail:
-       local_irq_restore(psr);
        return r;
 }
 
@@ -1291,7 +1348,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
        vcpu->kvm = kvm;
 
        cpu = get_cpu();
-       vti_vcpu_load(vcpu, cpu);
        r = vti_vcpu_setup(vcpu, id);
        put_cpu();
 
@@ -1427,7 +1483,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        }
        for (i = 0; i < 4; i++)
                regs->insvc[i] = vcpu->arch.insvc[i];
-       regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC);
+       regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu);
        SAVE_REGS(xtp);
        SAVE_REGS(metaphysical_rr0);
        SAVE_REGS(metaphysical_rr4);
@@ -1574,6 +1630,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
 
 void kvm_arch_flush_shadow(struct kvm *kvm)
 {
+       kvm_flush_remote_tlbs(kvm);
 }
 
 long kvm_arch_dev_ioctl(struct file *filp,
@@ -1616,8 +1673,37 @@ out:
        return 0;
 }
 
+
+/*
+ * On SN2, the ITC isn't stable, so copy in fast path code to use the
+ * SN2 RTC, replacing the ITC based default verion.
+ */
+static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info,
+                         struct module *module)
+{
+       unsigned long new_ar, new_ar_sn2;
+       unsigned long module_base;
+
+       if (!ia64_platform_is("sn2"))
+               return;
+
+       module_base = (unsigned long)module->module_core;
+
+       new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base;
+       new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base;
+
+       printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC "
+              "as source\n");
+
+       /*
+        * Copy the SN2 version of mov_ar into place. They are both
+        * the same size, so 6 bundles is sufficient (6 * 0x10).
+        */
+       memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60);
+}
+
 static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
-                                               struct module *module)
+                           struct module *module)
 {
        unsigned long module_base;
        unsigned long vmm_size;
@@ -1639,6 +1725,7 @@ static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
                return -EFAULT;
 
        memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
+       kvm_patch_vmm(vmm_info, module);
        kvm_flush_icache(kvm_vmm_base, vmm_size);
 
        /*Recalculate kvm_vmm_info based on new VMM*/
@@ -1792,38 +1879,24 @@ void kvm_arch_hardware_unsetup(void)
 {
 }
 
-static void vcpu_kick_intr(void *info)
-{
-#ifdef DEBUG
-       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
-       printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
-#endif
-}
-
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
 {
-       int ipi_pcpu = vcpu->cpu;
-       int cpu = get_cpu();
+       int me;
+       int cpu = vcpu->cpu;
 
        if (waitqueue_active(&vcpu->wq))
                wake_up_interruptible(&vcpu->wq);
 
-       if (vcpu->guest_mode && cpu != ipi_pcpu)
-               smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
+       me = get_cpu();
+       if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
+               if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
+                       smp_send_reschedule(cpu);
        put_cpu();
 }
 
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
 {
-
-       struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
-       if (!test_and_set_bit(vec, &vpd->irr[0])) {
-               vcpu->arch.irq_new_pending = 1;
-               kvm_vcpu_kick(vcpu);
-               return 1;
-       }
-       return 0;
+       return __apic_accept_irq(vcpu, irq->vector);
 }
 
 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
@@ -1836,20 +1909,18 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
        return 0;
 }
 
-struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
-                                      unsigned long bitmap)
+int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
 {
-       struct kvm_vcpu *lvcpu = kvm->vcpus[0];
-       int i;
-
-       for (i = 1; i < kvm->arch.online_vcpus; i++) {
-               if (!kvm->vcpus[i])
-                       continue;
-               if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
-                       lvcpu = kvm->vcpus[i];
-       }
+       return vcpu1->arch.xtp - vcpu2->arch.xtp;
+}
 
-       return lvcpu;
+int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
+               int short_hand, int dest, int dest_mode)
+{
+       struct kvm_lapic *target = vcpu->arch.apic;
+       return (dest_mode == 0) ?
+               kvm_apic_match_physical_addr(target, dest) :
+               kvm_apic_match_logical_addr(target, dest);
 }
 
 static int find_highest_bits(int *dat)
@@ -1888,6 +1959,12 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+       /* do real check here */
+       return 1;
+}
+
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.timer_fired;
@@ -1918,6 +1995,7 @@ static int vcpu_reset(struct kvm_vcpu *vcpu)
        long psr;
        local_irq_save(psr);
        r = kvm_insert_vmm_mapping(vcpu);
+       local_irq_restore(psr);
        if (r)
                goto fail;
 
@@ -1930,7 +2008,6 @@ static int vcpu_reset(struct kvm_vcpu *vcpu)
        kvm_purge_vmm_mapping(vcpu);
        r = 0;
 fail:
-       local_irq_restore(psr);
        return r;
 }
 
index a8ae52ed56358e3e3d1b81edf095ebfceaa36a3b..e4b82319881db227044550b3330f1eea69770f50 100644 (file)
@@ -21,6 +21,9 @@
 
 #include <linux/kvm_host.h>
 #include <linux/smp.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/clksupport.h>
+#include <asm/sn/shub_mmr.h>
 
 #include "vti.h"
 #include "misc.h"
@@ -188,12 +191,35 @@ static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
        return result;
 }
 
-static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
+/*
+ * On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2
+ * RTC is used instead. This function patches the ratios from SAL
+ * to match the RTC before providing them to the guest.
+ */
+static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result)
 {
+       struct pal_freq_ratio *ratio;
+       unsigned long sal_freq, sal_drift, factor;
+
+       result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
+                                           &sal_freq, &sal_drift);
+       ratio = (struct pal_freq_ratio *)&result->v2;
+       factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) /
+               sn_rtc_cycles_per_second;
+
+       ratio->num = 3;
+       ratio->den = factor;
+}
 
+static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
+{
        struct ia64_pal_retval result;
 
        PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
+
+       if (vcpu->kvm->arch.is_sn2)
+               sn2_patch_itc_freq_ratios(&result);
+
        return result;
 }
 
index 6d6cbcb14893c281952f82365779cf925cf7f0e0..ee541cebcd78aa7442aff169308a17e0259bc8c4 100644 (file)
@@ -20,6 +20,10 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
 
 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
 int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
+int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
+               int short_hand, int dest, int dest_mode);
+int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
+#define kvm_apic_present(x) (true)
 
 #endif
index 32254ce9a1bda61a25f3edce6143206a7d651aa1..f793be3effff5a773c3f5586994b374d1a5d2f82 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <asm/asmmacro.h>
 #include <asm/processor.h>
+#include <asm/kvm_host.h>
 
 #include "vti.h"
 #include "asm-offsets.h"
@@ -140,6 +141,35 @@ GLOBAL_ENTRY(kvm_asm_mov_from_ar)
        ;;
 END(kvm_asm_mov_from_ar)
 
+/*
+ * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC
+ * clock as it's source for emulating the ITC. This version will be
+ * copied on top of the original version if the host is determined to
+ * be an SN2.
+ */
+GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2)
+       add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
+       movl r19 = (KVM_VMM_BASE+(1<<KVM_VMM_SHIFT))
+
+       add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
+       extr.u r17=r25,6,7
+       mov r24=b0
+       ;;
+       ld8 r18=[r18]
+       ld8 r19=[r19]
+       addl r20=@gprel(asm_mov_to_reg),gp
+       ;;
+       add r19=r19,r18
+       shladd r17=r17,4,r20
+       ;;
+       adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
+       st8 [r16] = r19
+       mov b0=r17
+       br.sptk.few b0
+       ;;
+END(kvm_asm_mov_from_ar_sn2)
+
+
 
 // mov r1=rr[r3]
 GLOBAL_ENTRY(kvm_asm_mov_from_rr)
index b1dc80952d91fc49918de183bbeb454276e723e7..a8f84da04b49b8baa8daed4f49bdc2e178e240be 100644 (file)
@@ -652,20 +652,25 @@ void  kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
                unsigned long isr, unsigned long iim)
 {
        struct kvm_vcpu *v = current_vcpu;
+       long psr;
 
        if (ia64_psr(regs)->cpl == 0) {
                /* Allow hypercalls only when cpl = 0.  */
                if (iim == DOMN_PAL_REQUEST) {
+                       local_irq_save(psr);
                        set_pal_call_data(v);
                        vmm_transition(v);
                        get_pal_call_result(v);
                        vcpu_increment_iip(v);
+                       local_irq_restore(psr);
                        return;
                } else if (iim == DOMN_SAL_REQUEST) {
+                       local_irq_save(psr);
                        set_sal_call_data(v);
                        vmm_transition(v);
                        get_sal_call_result(v);
                        vcpu_increment_iip(v);
+                       local_irq_restore(psr);
                        return;
                }
        }
index a18ee17b9192e01f3c3f1683817be3ae775e1ca7..a2c6c15e4761871ffe98f5bba8b52b6b70691056 100644 (file)
@@ -788,13 +788,29 @@ void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
                setfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
 }
 
+/*
+ * The Altix RTC is mapped specially here for the vmm module
+ */
+#define SN_RTC_BASE    (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
+static long kvm_get_itc(struct kvm_vcpu *vcpu)
+{
+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
+       struct kvm *kvm = (struct kvm *)KVM_VM_BASE;
+
+       if (kvm->arch.is_sn2)
+               return (*SN_RTC_BASE);
+       else
+#endif
+               return ia64_getreg(_IA64_REG_AR_ITC);
+}
+
 /************************************************************************
  * lsapic timer
  ***********************************************************************/
 u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
 {
        unsigned long guest_itc;
-       guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
+       guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu);
 
        if (guest_itc >= VMX(vcpu, last_itc)) {
                VMX(vcpu, last_itc) = guest_itc;
@@ -809,7 +825,7 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
        struct kvm_vcpu *v;
        struct kvm *kvm;
        int i;
-       long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
+       long itc_offset = val - kvm_get_itc(vcpu);
        unsigned long vitv = VCPU(vcpu, itv);
 
        kvm = (struct kvm *)KVM_VM_BASE;
index 9eee5c04baccceaaf646d72922d3403cf313e2d5..f4b4c899bb6ce9d77d42d7a796931cb13e47ab03 100644 (file)
@@ -30,15 +30,19 @@ MODULE_AUTHOR("Intel");
 MODULE_LICENSE("GPL");
 
 extern char kvm_ia64_ivt;
+extern char kvm_asm_mov_from_ar;
+extern char kvm_asm_mov_from_ar_sn2;
 extern fpswa_interface_t *vmm_fpswa_interface;
 
 long vmm_sanity = 1;
 
 struct kvm_vmm_info vmm_info = {
-       .module      = THIS_MODULE,
-       .vmm_entry   = vmm_entry,
-       .tramp_entry = vmm_trampoline,
-       .vmm_ivt     = (unsigned long)&kvm_ia64_ivt,
+       .module                 = THIS_MODULE,
+       .vmm_entry              = vmm_entry,
+       .tramp_entry            = vmm_trampoline,
+       .vmm_ivt                = (unsigned long)&kvm_ia64_ivt,
+       .patch_mov_ar           = (unsigned long)&kvm_asm_mov_from_ar,
+       .patch_mov_ar_sn2       = (unsigned long)&kvm_asm_mov_from_ar_sn2,
 };
 
 static int __init  kvm_vmm_init(void)
index 3ef1a017a3189e76e713767f56d7c3a9e95fdb3c..40920c630649c3affa7556c355fea302b614f2a3 100644 (file)
@@ -95,7 +95,7 @@ GLOBAL_ENTRY(kvm_vmm_panic)
        ;;
        srlz.i    // guarantee that interruption collection is on
        ;;
-       //(p15) ssm psr.i               // restore psr.i
+       (p15) ssm psr.i               // restore psr.
        addl r14=@gprel(ia64_leave_hypervisor),gp
        ;;
        KVM_SAVE_REST
@@ -249,7 +249,7 @@ ENTRY(kvm_break_fault)
        ;;
        srlz.i         // guarantee that interruption collection is on
        ;;
-       //(p15)ssm psr.i               // restore psr.i
+       (p15)ssm psr.i               // restore psr.i
        addl r14=@gprel(ia64_leave_hypervisor),gp
        ;;
        KVM_SAVE_REST
@@ -439,7 +439,7 @@ kvm_dispatch_vexirq:
        ;;
        srlz.i // guarantee that interruption collection is on
        ;;
-       //(p15) ssm psr.i               // restore psr.i
+       (p15) ssm psr.i               // restore psr.i
        adds r3=8,r2                // set up second base pointer
        ;;
        KVM_SAVE_REST
@@ -819,7 +819,7 @@ ENTRY(kvm_dtlb_miss_dispatch)
        ;;
        srlz.i     // guarantee that interruption collection is on
        ;;
-       //(p15) ssm psr.i               // restore psr.i
+       (p15) ssm psr.i               // restore psr.i
        addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
        ;;
        KVM_SAVE_REST
@@ -842,7 +842,7 @@ ENTRY(kvm_itlb_miss_dispatch)
        ;;
        srlz.i   // guarantee that interruption collection is on
        ;;
-       //(p15) ssm psr.i               // restore psr.i
+       (p15) ssm psr.i               // restore psr.i
        addl r14=@gprel(ia64_leave_hypervisor),gp
        ;;
        KVM_SAVE_REST
@@ -871,7 +871,7 @@ ENTRY(kvm_dispatch_reflection)
        ;;
        srlz.i   // guarantee that interruption collection is on
        ;;
-       //(p15) ssm psr.i               // restore psr.i
+       (p15) ssm psr.i               // restore psr.i
        addl r14=@gprel(ia64_leave_hypervisor),gp
        ;;
        KVM_SAVE_REST
@@ -898,7 +898,7 @@ ENTRY(kvm_dispatch_virtualization_fault)
        ;;
        srlz.i    // guarantee that interruption collection is on
        ;;
-       //(p15) ssm psr.i               // restore psr.i
+       (p15) ssm psr.i               // restore psr.i
        addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
        ;;
        KVM_SAVE_REST
@@ -920,7 +920,7 @@ ENTRY(kvm_dispatch_interrupt)
        ;;
        srlz.i
        ;;
-       //(p15) ssm psr.i
+       (p15) ssm psr.i
        addl r14=@gprel(ia64_leave_hypervisor),gp
        ;;
        KVM_SAVE_REST
@@ -1333,7 +1333,7 @@ hostret =   r24
        ;;
 (p7)    srlz.i
        ;;
-//(p6)    ssm psr.i
+(p6)    ssm psr.i
        ;;
        mov rp=rpsave
        mov ar.pfs=pfssave
index 2c2501f131597c6389e82e6c4e3e62eb6ecf7f80..4290a429bf7c9b48f0cd9ff049d60626e23af689 100644 (file)
@@ -254,7 +254,8 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
                        "(p7) st8 [%2]=r9;;"
                        "ssm psr.ic;;"
                        "srlz.d;;"
-                       /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
+                       "ssm psr.i;;"
+                       "srlz.d;;"
                        : "=r"(ret) : "r"(iha), "r"(pte):"memory");
 
        return ret;
index 355926730e8d9f49387138fb2cf1cc5b9b3b8898..89faacad5d17d71871d75453fdf55d107f15985e 100644 (file)
@@ -8,6 +8,7 @@ mainmenu "Linux Kernel Configuration"
 config MN10300
        def_bool y
        select HAVE_OPROFILE
+       select HAVE_ARCH_TRACEHOOK
 
 config AM33
        def_bool y
index bf09f8bb392eef17bf41441050b5adb6e9920d0b..49105462e6fc9c5fb095fa35ad25b55493ba15a6 100644 (file)
@@ -34,7 +34,7 @@
  */
 typedef unsigned long elf_greg_t;
 
-#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
+#define ELF_NGREG ((sizeof(struct pt_regs) / sizeof(elf_greg_t)) - 1)
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 
 #define ELF_NFPREG 32
@@ -76,6 +76,7 @@ do {                                                                  \
 } while (0)
 
 #define USE_ELF_CORE_DUMP
+#define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE      4096
 
 /*
index 73239271873da304c88fe396eda7074686c3185a..f7d4b0d285e8dddb835781ac071898ff5d34dfe9 100644 (file)
@@ -143,13 +143,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
 
 unsigned long get_wchan(struct task_struct *p);
 
-#define task_pt_regs(task)                                             \
-({                                                                     \
-       struct pt_regs *__regs__;                                       \
-       __regs__ = (struct pt_regs *) (KSTK_TOP(task_stack_page(task)) - 8); \
-       __regs__ - 1;                                                   \
-})
-
+#define task_pt_regs(task) ((task)->thread.uregs)
 #define KSTK_EIP(task) (task_pt_regs(task)->pc)
 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
 
index 7b06cc623d8b074a3a23511673e2c170bb033840..921942ed1b03508a582e6e226ed33acf50c8fa5b 100644 (file)
@@ -91,9 +91,17 @@ extern struct pt_regs *__frame; /* current frame pointer */
 #if defined(__KERNEL__)
 
 #if !defined(__ASSEMBLY__)
+struct task_struct;
+
 #define user_mode(regs)                        (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
 #define instruction_pointer(regs)      ((regs)->pc)
+#define user_stack_pointer(regs)       ((regs)->sp)
 extern void show_regs(struct pt_regs *);
+
+#define arch_has_single_step() (1)
+extern void user_enable_single_step(struct task_struct *);
+extern void user_disable_single_step(struct task_struct *);
+
 #endif  /*  !__ASSEMBLY  */
 
 #define profile_pc(regs) ((regs)->pc)
index 3dc3e462f92a4e39b496f4d78f7bc1adc6e64e85..7408a27199f342fd26679db7a326aa0b07885f53 100644 (file)
@@ -76,7 +76,7 @@ ENTRY(system_call)
        cmp     nr_syscalls,d0
        bcc     syscall_badsys
        btst    _TIF_SYSCALL_TRACE,(TI_flags,a2)
-       bne     syscall_trace_entry
+       bne     syscall_entry_trace
 syscall_call:
        add     d0,d0,a1
        add     a1,a1
@@ -104,11 +104,10 @@ restore_all:
 syscall_exit_work:
        btst    _TIF_SYSCALL_TRACE,d2
        beq     work_pending
-       __sti                           # could let do_syscall_trace() call
+       __sti                           # could let syscall_trace_exit() call
                                        # schedule() instead
        mov     fp,d0
-       mov     1,d1
-       call    do_syscall_trace[],0    # do_syscall_trace(regs,entryexit)
+       call    syscall_trace_exit[],0  # do_syscall_trace(regs)
        jmp     resume_userspace
 
        ALIGN
@@ -138,13 +137,11 @@ work_notifysig:
        jmp     resume_userspace
 
        # perform syscall entry tracing
-syscall_trace_entry:
+syscall_entry_trace:
        mov     -ENOSYS,d0
        mov     d0,(REG_D0,fp)
        mov     fp,d0
-       clr     d1
-       call    do_syscall_trace[],0
-       mov     (REG_ORIG_D0,fp),d0
+       call    syscall_trace_entry[],0 # returns the syscall number to actually use
        mov     (REG_D1,fp),d1
        cmp     nr_syscalls,d0
        bcs     syscall_call
index d6d6cdc75c523b66e6218d493311aac2afc5f017..e143339ad28e00869d9b23c1d882d994cdd89d62 100644 (file)
@@ -17,6 +17,9 @@
 #include <linux/errno.h>
 #include <linux/ptrace.h>
 #include <linux/user.h>
+#include <linux/regset.h>
+#include <linux/elf.h>
+#include <linux/tracehook.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -64,12 +67,6 @@ static inline int get_stack_long(struct task_struct *task, int offset)
                ((unsigned long) task->thread.uregs + offset);
 }
 
-/*
- * this routine will put a word on the processes privileged stack.
- * the offset is how far from the base addr as stored in the TSS.
- * this routine assumes that all the privileged stacks are in our
- * data space.
- */
 static inline
 int put_stack_long(struct task_struct *task, int offset, unsigned long data)
 {
@@ -80,94 +77,233 @@ int put_stack_long(struct task_struct *task, int offset, unsigned long data)
        return 0;
 }
 
-static inline unsigned long get_fpregs(struct fpu_state_struct *buf,
-                                      struct task_struct *tsk)
+/*
+ * retrieve the contents of MN10300 userspace general registers
+ */
+static int genregs_get(struct task_struct *target,
+                      const struct user_regset *regset,
+                      unsigned int pos, unsigned int count,
+                      void *kbuf, void __user *ubuf)
 {
-       return __copy_to_user(buf, &tsk->thread.fpu_state,
-                             sizeof(struct fpu_state_struct));
+       const struct pt_regs *regs = task_pt_regs(target);
+       int ret;
+
+       /* we need to skip regs->next */
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 regs, 0, PT_ORIG_D0 * sizeof(long));
+       if (ret < 0)
+               return ret;
+
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 &regs->orig_d0, PT_ORIG_D0 * sizeof(long),
+                                 NR_PTREGS * sizeof(long));
+       if (ret < 0)
+               return ret;
+
+       return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+                                       NR_PTREGS * sizeof(long), -1);
 }
 
-static inline unsigned long set_fpregs(struct task_struct *tsk,
-                                      struct fpu_state_struct *buf)
+/*
+ * update the contents of the MN10300 userspace general registers
+ */
+static int genregs_set(struct task_struct *target,
+                      const struct user_regset *regset,
+                      unsigned int pos, unsigned int count,
+                      const void *kbuf, const void __user *ubuf)
 {
-       return __copy_from_user(&tsk->thread.fpu_state, buf,
-                               sizeof(struct fpu_state_struct));
+       struct pt_regs *regs = task_pt_regs(target);
+       unsigned long tmp;
+       int ret;
+
+       /* we need to skip regs->next */
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                regs, 0, PT_ORIG_D0 * sizeof(long));
+       if (ret < 0)
+               return ret;
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &regs->orig_d0, PT_ORIG_D0 * sizeof(long),
+                                PT_EPSW * sizeof(long));
+       if (ret < 0)
+               return ret;
+
+       /* we need to mask off changes to EPSW */
+       tmp = regs->epsw;
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &tmp, PT_EPSW * sizeof(long),
+                                PT_PC * sizeof(long));
+       tmp &= EPSW_FLAG_V | EPSW_FLAG_C | EPSW_FLAG_N | EPSW_FLAG_Z;
+       tmp |= regs->epsw & ~(EPSW_FLAG_V | EPSW_FLAG_C | EPSW_FLAG_N |
+                             EPSW_FLAG_Z);
+       regs->epsw = tmp;
+
+       if (ret < 0)
+               return ret;
+
+       /* and finally load the PC */
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &regs->pc, PT_PC * sizeof(long),
+                                NR_PTREGS * sizeof(long));
+
+       if (ret < 0)
+               return ret;
+
+       return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+                                        NR_PTREGS * sizeof(long), -1);
 }
 
-static inline void fpsave_init(struct task_struct *task)
+/*
+ * retrieve the contents of MN10300 userspace FPU registers
+ */
+static int fpuregs_get(struct task_struct *target,
+                      const struct user_regset *regset,
+                      unsigned int pos, unsigned int count,
+                      void *kbuf, void __user *ubuf)
 {
-       memset(&task->thread.fpu_state, 0, sizeof(struct fpu_state_struct));
+       const struct fpu_state_struct *fpregs = &target->thread.fpu_state;
+       int ret;
+
+       unlazy_fpu(target);
+
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                 fpregs, 0, sizeof(*fpregs));
+       if (ret < 0)
+               return ret;
+
+       return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+                                       sizeof(*fpregs), -1);
 }
 
 /*
- * make sure the single step bit is not set
+ * update the contents of the MN10300 userspace FPU registers
  */
-void ptrace_disable(struct task_struct *child)
+static int fpuregs_set(struct task_struct *target,
+                      const struct user_regset *regset,
+                      unsigned int pos, unsigned int count,
+                      const void *kbuf, const void __user *ubuf)
+{
+       struct fpu_state_struct fpu_state = target->thread.fpu_state;
+       int ret;
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &fpu_state, 0, sizeof(fpu_state));
+       if (ret < 0)
+               return ret;
+
+       fpu_kill_state(target);
+       target->thread.fpu_state = fpu_state;
+       set_using_fpu(target);
+
+       return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+                                        sizeof(fpu_state), -1);
+}
+
+/*
+ * determine if the FPU registers have actually been used
+ */
+static int fpuregs_active(struct task_struct *target,
+                         const struct user_regset *regset)
+{
+       return is_using_fpu(target) ? regset->n : 0;
+}
+
+/*
+ * Define the register sets available on the MN10300 under Linux
+ */
+enum mn10300_regset {
+       REGSET_GENERAL,
+       REGSET_FPU,
+};
+
+static const struct user_regset mn10300_regsets[] = {
+       /*
+        * General register format is:
+        *      A3, A2, D3, D2, MCVF, MCRL, MCRH, MDRQ
+        *      E1, E0, E7...E2, SP, LAR, LIR, MDR
+        *      A1, A0, D1, D0, ORIG_D0, EPSW, PC
+        */
+       [REGSET_GENERAL] = {
+               .core_note_type = NT_PRSTATUS,
+               .n              = ELF_NGREG,
+               .size           = sizeof(long),
+               .align          = sizeof(long),
+               .get            = genregs_get,
+               .set            = genregs_set,
+       },
+       /*
+        * FPU register format is:
+        *      FS0-31, FPCR
+        */
+       [REGSET_FPU] = {
+               .core_note_type = NT_PRFPREG,
+               .n              = sizeof(struct fpu_state_struct) / sizeof(long),
+               .size           = sizeof(long),
+               .align          = sizeof(long),
+               .get            = fpuregs_get,
+               .set            = fpuregs_set,
+               .active         = fpuregs_active,
+       },
+};
+
+static const struct user_regset_view user_mn10300_native_view = {
+       .name           = "mn10300",
+       .e_machine      = EM_MN10300,
+       .regsets        = mn10300_regsets,
+       .n              = ARRAY_SIZE(mn10300_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+       return &user_mn10300_native_view;
+}
+
+/*
+ * set the single-step bit
+ */
+void user_enable_single_step(struct task_struct *child)
 {
 #ifndef CONFIG_MN10300_USING_JTAG
        struct user *dummy = NULL;
        long tmp;
 
        tmp = get_stack_long(child, (unsigned long) &dummy->regs.epsw);
-       tmp &= ~EPSW_T;
+       tmp |= EPSW_T;
        put_stack_long(child, (unsigned long) &dummy->regs.epsw, tmp);
 #endif
 }
 
 /*
- * set the single step bit
+ * make sure the single-step bit is not set
  */
-void ptrace_enable(struct task_struct *child)
+void user_disable_single_step(struct task_struct *child)
 {
 #ifndef CONFIG_MN10300_USING_JTAG
        struct user *dummy = NULL;
        long tmp;
 
        tmp = get_stack_long(child, (unsigned long) &dummy->regs.epsw);
-       tmp |= EPSW_T;
+       tmp &= ~EPSW_T;
        put_stack_long(child, (unsigned long) &dummy->regs.epsw, tmp);
 #endif
 }
 
+void ptrace_disable(struct task_struct *child)
+{
+       user_disable_single_step(child);
+}
+
 /*
  * handle the arch-specific side of process tracing
  */
 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 {
-       struct fpu_state_struct fpu_state;
-       int i, ret;
+       unsigned long tmp;
+       int ret;
 
        switch (request) {
-       /* read the word at location addr. */
-       case PTRACE_PEEKTEXT: {
-               unsigned long tmp;
-               int copied;
-
-               copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-               ret = -EIO;
-               if (copied != sizeof(tmp))
-                       break;
-               ret = put_user(tmp, (unsigned long *) data);
-               break;
-       }
-
-       /* read the word at location addr. */
-       case PTRACE_PEEKDATA: {
-               unsigned long tmp;
-               int copied;
-
-               copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
-               ret = -EIO;
-               if (copied != sizeof(tmp))
-                       break;
-               ret = put_user(tmp, (unsigned long *) data);
-               break;
-       }
-
        /* read the word at location addr in the USER area. */
-       case PTRACE_PEEKUSR: {
-               unsigned long tmp;
-
+       case PTRACE_PEEKUSR:
                ret = -EIO;
                if ((addr & 3) || addr < 0 ||
                    addr > sizeof(struct user) - 3)
@@ -179,17 +315,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                             ptrace_regid_to_frame[addr]);
                ret = put_user(tmp, (unsigned long *) data);
                break;
-       }
-
-       /* write the word at location addr. */
-       case PTRACE_POKETEXT:
-       case PTRACE_POKEDATA:
-               if (access_process_vm(child, addr, &data, sizeof(data), 1) ==
-                   sizeof(data))
-                       ret = 0;
-               else
-                       ret = -EIO;
-               break;
 
                /* write the word at location addr in the USER area */
        case PTRACE_POKEUSR:
@@ -204,132 +329,32 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                                             data);
                break;
 
-               /* continue and stop at next (return from) syscall */
-       case PTRACE_SYSCALL:
-               /* restart after signal. */
-       case PTRACE_CONT:
-               ret = -EIO;
-               if ((unsigned long) data > _NSIG)
-                       break;
-               if (request == PTRACE_SYSCALL)
-                       set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-               else
-                       clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-               child->exit_code = data;
-               ptrace_disable(child);
-               wake_up_process(child);
-               ret = 0;
-               break;
-
-               /*
-                * make the child exit
-                * - the best I can do is send it a sigkill
-                * - perhaps it should be put in the status that it wants to
-                *   exit
-                */
-       case PTRACE_KILL:
-               ret = 0;
-               if (child->exit_state == EXIT_ZOMBIE)   /* already dead */
-                       break;
-               child->exit_code = SIGKILL;
-               clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-               ptrace_disable(child);
-               wake_up_process(child);
-               break;
-
-       case PTRACE_SINGLESTEP: /* set the trap flag. */
-#ifndef CONFIG_MN10300_USING_JTAG
-               ret = -EIO;
-               if ((unsigned long) data > _NSIG)
-                       break;
-               clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-               ptrace_enable(child);
-               child->exit_code = data;
-               wake_up_process(child);
-               ret = 0;
-#else
-               ret = -EINVAL;
-#endif
-               break;
-
-       case PTRACE_DETACH:     /* detach a process that was attached. */
-               ret = ptrace_detach(child, data);
-               break;
-
-               /* Get all gp regs from the child. */
-       case PTRACE_GETREGS: {
-               unsigned long tmp;
-
-               if (!access_ok(VERIFY_WRITE, (unsigned *) data, NR_PTREGS << 2)) {
-                       ret = -EIO;
-                       break;
-               }
-
-               for (i = 0; i < NR_PTREGS << 2; i += 4) {
-                       tmp = get_stack_long(child, ptrace_regid_to_frame[i]);
-                       __put_user(tmp, (unsigned long *) data);
-                       data += sizeof(tmp);
-               }
-               ret = 0;
-               break;
-       }
-
-       case PTRACE_SETREGS: { /* Set all gp regs in the child. */
-               unsigned long tmp;
-
-               if (!access_ok(VERIFY_READ, (unsigned long *)data,
-                              sizeof(struct pt_regs))) {
-                       ret = -EIO;
-                       break;
-               }
-
-               for (i = 0; i < NR_PTREGS << 2; i += 4) {
-                       __get_user(tmp, (unsigned long *) data);
-                       put_stack_long(child, ptrace_regid_to_frame[i], tmp);
-                       data += sizeof(tmp);
-               }
-               ret = 0;
-               break;
-       }
-
-       case PTRACE_GETFPREGS: { /* Get the child FPU state. */
-               if (is_using_fpu(child)) {
-                       unlazy_fpu(child);
-                       fpu_state = child->thread.fpu_state;
-               } else {
-                       memset(&fpu_state, 0, sizeof(fpu_state));
-               }
-
-               ret = -EIO;
-               if (copy_to_user((void *) data, &fpu_state,
-                                sizeof(fpu_state)) == 0)
-                       ret = 0;
-               break;
-       }
-
-       case PTRACE_SETFPREGS: { /* Set the child FPU state. */
-               ret = -EFAULT;
-               if (copy_from_user(&fpu_state, (const void *) data,
-                                  sizeof(fpu_state)) == 0) {
-                       fpu_kill_state(child);
-                       child->thread.fpu_state = fpu_state;
-                       set_using_fpu(child);
-                       ret = 0;
-               }
-               break;
-       }
-
-       case PTRACE_SETOPTIONS: {
-               if (data & PTRACE_O_TRACESYSGOOD)
-                       child->ptrace |= PT_TRACESYSGOOD;
-               else
-                       child->ptrace &= ~PT_TRACESYSGOOD;
-               ret = 0;
-               break;
-       }
+       case PTRACE_GETREGS:    /* Get all integer regs from the child. */
+               return copy_regset_to_user(child, &user_mn10300_native_view,
+                                          REGSET_GENERAL,
+                                          0, NR_PTREGS * sizeof(long),
+                                          (void __user *)data);
+
+       case PTRACE_SETREGS:    /* Set all integer regs in the child. */
+               return copy_regset_from_user(child, &user_mn10300_native_view,
+                                            REGSET_GENERAL,
+                                            0, NR_PTREGS * sizeof(long),
+                                            (const void __user *)data);
+
+       case PTRACE_GETFPREGS:  /* Get the child FPU state. */
+               return copy_regset_to_user(child, &user_mn10300_native_view,
+                                          REGSET_FPU,
+                                          0, sizeof(struct fpu_state_struct),
+                                          (void __user *)data);
+
+       case PTRACE_SETFPREGS:  /* Set the child FPU state. */
+               return copy_regset_from_user(child, &user_mn10300_native_view,
+                                            REGSET_FPU,
+                                            0, sizeof(struct fpu_state_struct),
+                                            (const void __user *)data);
 
        default:
-               ret = -EIO;
+               ret = ptrace_request(child, request, addr, data);
                break;
        }
 
@@ -337,43 +362,26 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 }
 
 /*
- * notification of system call entry/exit
- * - triggered by current->work.syscall_trace
+ * handle tracing of system call entry
+ * - return the revised system call number or ULONG_MAX to cause ENOSYS
  */
-asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
+asmlinkage unsigned long syscall_trace_entry(struct pt_regs *regs)
 {
-#if 0
-       /* just in case... */
-       printk(KERN_DEBUG "[%d] syscall_%lu(%lx,%lx,%lx,%lx) = %lx\n",
-              current->pid,
-              regs->orig_d0,
-              regs->a0,
-              regs->d1,
-              regs->a3,
-              regs->a2,
-              regs->d0);
-       return;
-#endif
-
-       if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
-           !test_thread_flag(TIF_SINGLESTEP))
-               return;
-       if (!(current->ptrace & PT_PTRACED))
-               return;
+       if (tracehook_report_syscall_entry(regs))
+               /* tracing decided this syscall should not happen, so
+                * We'll return a bogus call number to get an ENOSYS
+                * error, but leave the original number in
+                * regs->orig_d0
+                */
+               return ULONG_MAX;
 
-       /* the 0x80 provides a way for the tracing parent to distinguish
-          between a syscall stop and SIGTRAP delivery */
-       ptrace_notify(SIGTRAP |
-                     ((current->ptrace & PT_TRACESYSGOOD) &&
-                      !test_thread_flag(TIF_SINGLESTEP) ? 0x80 : 0));
+       return regs->orig_d0;
+}
 
-       /*
-        * this isn't the same as continuing with a signal, but it will do
-        * for normal use.  strace only continues with a signal if the
-        * stopping signal is not SIGTRAP.  -brl
-        */
-       if (current->exit_code) {
-               send_sig(current->exit_code, current, 1);
-               current->exit_code = 0;
-       }
+/*
+ * handle tracing of system call exit
+ */
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+       tracehook_report_syscall_exit(regs, 0);
 }
index 841ca9955a1813836ef2d07d0b7dfbc03e335347..9f7572a0f5784a30d9ff16bbcfa407d10fc8a066 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/tty.h>
 #include <linux/personality.h>
 #include <linux/suspend.h>
+#include <linux/tracehook.h>
 #include <asm/cacheflush.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
@@ -511,6 +512,9 @@ static void do_signal(struct pt_regs *regs)
                         * clear the TIF_RESTORE_SIGMASK flag */
                        if (test_thread_flag(TIF_RESTORE_SIGMASK))
                                clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+                       tracehook_signal_handler(signr, &info, &ka, regs,
+                                                test_thread_flag(TIF_SINGLESTEP));
                }
 
                return;
@@ -561,4 +565,9 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
        /* deal with pending signal delivery */
        if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
                do_signal(regs);
+
+       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+               clear_thread_flag(TIF_NOTIFY_RESUME);
+               tracehook_notify_resume(__frame);
+       }
 }
index 789208094e985ec6a02f25c666492b5735c58bb9..7095147dcb8ba2f83bbad869b7b2e4366dd4a6a5 100644 (file)
@@ -165,24 +165,6 @@ ENTRY(itlb_aerror)
 ENTRY(dtlb_aerror)
        and     ~EPSW_NMID,epsw
        add     -4,sp
-       mov     d1,(sp)
-
-       movhu   (MMUFCR_DFC),d1                 # is it the initial valid write
-                                               # to this page?
-       and     MMUFCR_xFC_INITWR,d1
-       beq     dtlb_pagefault                  # jump if not
-
-       mov     (DPTEL),d1                      # set the dirty bit
-                                               # (don't replace with BSET!)
-       or      _PAGE_DIRTY,d1
-       mov     d1,(DPTEL)
-       mov     (sp),d1
-       add     4,sp
-       rti
-
-       ALIGN
-dtlb_pagefault:
-       mov     (sp),d1
        SAVE_ALL
        add     -4,sp                           # need to pass three params
 
index 9057335fdc616ce607f76b2dc7e099965ce49e5e..2cf915e51e7e9e7c7ad6202e0d287187b8a313ed 100644 (file)
@@ -41,6 +41,12 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
        return !!(v->arch.pending_exceptions);
 }
 
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+       /* do real check here */
+       return 1;
+}
+
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 {
        return !(v->arch.msr & MSR_WE);
index 54ea39f96ecdf17f04a078f51a1c20d45003897b..a27d0d5a6f8689fa81f558686cad2f378487be42 100644 (file)
@@ -13,6 +13,8 @@
 
 #ifndef ASM_KVM_HOST_H
 #define ASM_KVM_HOST_H
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
 #include <linux/kvm_host.h>
 #include <asm/debug.h>
 #include <asm/cpuid.h>
@@ -210,7 +212,8 @@ struct kvm_vcpu_arch {
        s390_fp_regs      guest_fpregs;
        unsigned int      guest_acrs[NUM_ACRS];
        struct kvm_s390_local_interrupt local_int;
-       struct timer_list ckc_timer;
+       struct hrtimer    ckc_timer;
+       struct tasklet_struct tasklet;
        union  {
                cpuid_t   cpu_id;
                u64       stidp_data;
index 9d19803111bab26d8021d3aaf2f0f028092f8c8b..98997ccba50151dbf7965831b062e5f74c44fb93 100644 (file)
@@ -154,17 +154,25 @@ static int handle_stop(struct kvm_vcpu *vcpu)
 static int handle_validity(struct kvm_vcpu *vcpu)
 {
        int viwhy = vcpu->arch.sie_block->ipb >> 16;
+       int rc;
+
        vcpu->stat.exit_validity++;
-       if (viwhy == 0x37) {
-               fault_in_pages_writeable((char __user *)
-                                        vcpu->kvm->arch.guest_origin +
-                                        vcpu->arch.sie_block->prefix,
-                                        PAGE_SIZE);
-               return 0;
-       }
-       VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
-                  viwhy);
-       return -ENOTSUPP;
+       if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix
+               <= vcpu->kvm->arch.guest_memsize - 2*PAGE_SIZE)){
+               rc = fault_in_pages_writeable((char __user *)
+                        vcpu->kvm->arch.guest_origin +
+                        vcpu->arch.sie_block->prefix,
+                        2*PAGE_SIZE);
+               if (rc)
+                       /* user will receive sigsegv, exit to user */
+                       rc = -ENOTSUPP;
+       } else
+               rc = -ENOTSUPP;
+
+       if (rc)
+               VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
+                          viwhy);
+       return rc;
 }
 
 static int handle_instruction(struct kvm_vcpu *vcpu)
index 0189356fe2098cb3f6ddd61434da623b8a7c9cfa..f04f5301b1b42fdd06207c93a84bd6568594cd72 100644 (file)
@@ -12,6 +12,8 @@
 
 #include <asm/lowcore.h>
 #include <asm/uaccess.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
 #include <linux/kvm_host.h>
 #include <linux/signal.h>
 #include "kvm-s390.h"
@@ -299,13 +301,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
        }
 
        if ((!rc) && atomic_read(&fi->active)) {
-               spin_lock_bh(&fi->lock);
+               spin_lock(&fi->lock);
                list_for_each_entry(inti, &fi->list, list)
                        if (__interrupt_is_deliverable(vcpu, inti)) {
                                rc = 1;
                                break;
                        }
-               spin_unlock_bh(&fi->lock);
+               spin_unlock(&fi->lock);
        }
 
        if ((!rc) && (vcpu->arch.sie_block->ckc <
@@ -318,6 +320,12 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
        return rc;
 }
 
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+       /* do real check here */
+       return 1;
+}
+
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 {
        return 0;
@@ -355,14 +363,12 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
                return 0;
        }
 
-       sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1;
+       sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
 
-       vcpu->arch.ckc_timer.expires = jiffies + sltime;
-
-       add_timer(&vcpu->arch.ckc_timer);
-       VCPU_EVENT(vcpu, 5, "enabled wait timer:%llx jiffies", sltime);
+       hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
+       VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
 no_timer:
-       spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
+       spin_lock(&vcpu->arch.local_int.float_int->lock);
        spin_lock_bh(&vcpu->arch.local_int.lock);
        add_wait_queue(&vcpu->arch.local_int.wq, &wait);
        while (list_empty(&vcpu->arch.local_int.list) &&
@@ -371,33 +377,46 @@ no_timer:
                !signal_pending(current)) {
                set_current_state(TASK_INTERRUPTIBLE);
                spin_unlock_bh(&vcpu->arch.local_int.lock);
-               spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
+               spin_unlock(&vcpu->arch.local_int.float_int->lock);
                vcpu_put(vcpu);
                schedule();
                vcpu_load(vcpu);
-               spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
+               spin_lock(&vcpu->arch.local_int.float_int->lock);
                spin_lock_bh(&vcpu->arch.local_int.lock);
        }
        __unset_cpu_idle(vcpu);
        __set_current_state(TASK_RUNNING);
        remove_wait_queue(&vcpu->wq, &wait);
        spin_unlock_bh(&vcpu->arch.local_int.lock);
-       spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
-       del_timer(&vcpu->arch.ckc_timer);
+       spin_unlock(&vcpu->arch.local_int.float_int->lock);
+       hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
        return 0;
 }
 
-void kvm_s390_idle_wakeup(unsigned long data)
+void kvm_s390_tasklet(unsigned long parm)
 {
-       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+       struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
 
-       spin_lock_bh(&vcpu->arch.local_int.lock);
+       spin_lock(&vcpu->arch.local_int.lock);
        vcpu->arch.local_int.timer_due = 1;
        if (waitqueue_active(&vcpu->arch.local_int.wq))
                wake_up_interruptible(&vcpu->arch.local_int.wq);
-       spin_unlock_bh(&vcpu->arch.local_int.lock);
+       spin_unlock(&vcpu->arch.local_int.lock);
 }
 
+/*
+ * low level hrtimer wake routine. Because this runs in hardirq context
+ * we schedule a tasklet to do the real work.
+ */
+enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
+{
+       struct kvm_vcpu *vcpu;
+
+       vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
+       tasklet_schedule(&vcpu->arch.tasklet);
+
+       return HRTIMER_NORESTART;
+}
 
 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
 {
@@ -436,7 +455,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
        if (atomic_read(&fi->active)) {
                do {
                        deliver = 0;
-                       spin_lock_bh(&fi->lock);
+                       spin_lock(&fi->lock);
                        list_for_each_entry_safe(inti, n, &fi->list, list) {
                                if (__interrupt_is_deliverable(vcpu, inti)) {
                                        list_del(&inti->list);
@@ -447,7 +466,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
                        }
                        if (list_empty(&fi->list))
                                atomic_set(&fi->active, 0);
-                       spin_unlock_bh(&fi->lock);
+                       spin_unlock(&fi->lock);
                        if (deliver) {
                                __do_deliver_interrupt(vcpu, inti);
                                kfree(inti);
@@ -512,7 +531,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
 
        mutex_lock(&kvm->lock);
        fi = &kvm->arch.float_int;
-       spin_lock_bh(&fi->lock);
+       spin_lock(&fi->lock);
        list_add_tail(&inti->list, &fi->list);
        atomic_set(&fi->active, 1);
        sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
@@ -529,7 +548,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
        if (waitqueue_active(&li->wq))
                wake_up_interruptible(&li->wq);
        spin_unlock_bh(&li->lock);
-       spin_unlock_bh(&fi->lock);
+       spin_unlock(&fi->lock);
        mutex_unlock(&kvm->lock);
        return 0;
 }
index f4d56e9939c90949c1e886eaabf6cc7d35cab858..10bccd1f8aee58a33220bfbc07aa3b3452b1b3e7 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/compiler.h>
 #include <linux/err.h>
 #include <linux/fs.h>
+#include <linux/hrtimer.h>
 #include <linux/init.h>
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
@@ -195,6 +196,10 @@ out_nokvm:
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
        VCPU_EVENT(vcpu, 3, "%s", "free cpu");
+       if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
+               (__u64) vcpu->arch.sie_block)
+               vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
+       smp_mb();
        free_page((unsigned long)(vcpu->arch.sie_block));
        kvm_vcpu_uninit(vcpu);
        kfree(vcpu);
@@ -283,8 +288,10 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
        vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
        vcpu->arch.sie_block->ecb   = 2;
        vcpu->arch.sie_block->eca   = 0xC1002001U;
-       setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
-                (unsigned long) vcpu);
+       hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+       tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
+                    (unsigned long) vcpu);
+       vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
        get_cpu_id(&vcpu->arch.cpu_id);
        vcpu->arch.cpu_id.version = 0xff;
        return 0;
@@ -307,19 +314,21 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 
        vcpu->arch.sie_block->icpua = id;
        BUG_ON(!kvm->arch.sca);
-       BUG_ON(kvm->arch.sca->cpu[id].sda);
-       kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
+       if (!kvm->arch.sca->cpu[id].sda)
+               kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
+       else
+               BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
        vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
        vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
 
        spin_lock_init(&vcpu->arch.local_int.lock);
        INIT_LIST_HEAD(&vcpu->arch.local_int.list);
        vcpu->arch.local_int.float_int = &kvm->arch.float_int;
-       spin_lock_bh(&kvm->arch.float_int.lock);
+       spin_lock(&kvm->arch.float_int.lock);
        kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
        init_waitqueue_head(&vcpu->arch.local_int.wq);
        vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
-       spin_unlock_bh(&kvm->arch.float_int.lock);
+       spin_unlock(&kvm->arch.float_int.lock);
 
        rc = kvm_vcpu_init(vcpu, kvm, id);
        if (rc)
@@ -478,6 +487,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        vcpu_load(vcpu);
 
+       /* verify, that memory has been registered */
+       if (!vcpu->kvm->arch.guest_memsize) {
+               vcpu_put(vcpu);
+               return -EINVAL;
+       }
+
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 
@@ -657,6 +672,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
                                struct kvm_memory_slot old,
                                int user_alloc)
 {
+       int i;
+
        /* A few sanity checks. We can have exactly one memory slot which has
           to start at guest virtual zero and which has to be located at a
           page boundary in userland and which has to end at a page boundary.
@@ -664,7 +681,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
           vmas. It is okay to mmap() and munmap() stuff in this slot after
           doing this call at any time */
 
-       if (mem->slot)
+       if (mem->slot || kvm->arch.guest_memsize)
                return -EINVAL;
 
        if (mem->guest_phys_addr)
@@ -676,15 +693,39 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
        if (mem->memory_size & (PAGE_SIZE - 1))
                return -EINVAL;
 
+       if (!user_alloc)
+               return -EINVAL;
+
+       /* lock all vcpus */
+       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+               if (!kvm->vcpus[i])
+                       continue;
+               if (!mutex_trylock(&kvm->vcpus[i]->mutex))
+                       goto fail_out;
+       }
+
        kvm->arch.guest_origin = mem->userspace_addr;
        kvm->arch.guest_memsize = mem->memory_size;
 
-       /* FIXME: we do want to interrupt running CPUs and update their memory
-          configuration now to avoid race conditions. But hey, changing the
-          memory layout while virtual CPUs are running is usually bad
-          programming practice. */
+       /* update sie control blocks, and unlock all vcpus */
+       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+               if (kvm->vcpus[i]) {
+                       kvm->vcpus[i]->arch.sie_block->gmsor =
+                               kvm->arch.guest_origin;
+                       kvm->vcpus[i]->arch.sie_block->gmslm =
+                               kvm->arch.guest_memsize +
+                               kvm->arch.guest_origin +
+                               VIRTIODESCSPACE - 1ul;
+                       mutex_unlock(&kvm->vcpus[i]->mutex);
+               }
+       }
 
        return 0;
+
+fail_out:
+       for (; i >= 0; i--)
+               mutex_unlock(&kvm->vcpus[i]->mutex);
+       return -EINVAL;
 }
 
 void kvm_arch_flush_shadow(struct kvm *kvm)
index 00bbe69b78da97757b8da8840d0f55977a7aca8c..748fee872323f3cd996bfda466e01cd54fc8e169 100644 (file)
@@ -14,6 +14,7 @@
 #ifndef ARCH_S390_KVM_S390_H
 #define ARCH_S390_KVM_S390_H
 
+#include <linux/hrtimer.h>
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 
@@ -41,7 +42,8 @@ static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu)
 }
 
 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
-void kvm_s390_idle_wakeup(unsigned long data);
+enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
+void kvm_s390_tasklet(unsigned long parm);
 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
 int kvm_s390_inject_vm(struct kvm *kvm,
                struct kvm_s390_interrupt *s390int);
index 4b88834b8dd8bd4be94de20d5bf248b8f195bcd6..93ecd06e1a749fc866177c5bd5c6bcdd6ae82e81 100644 (file)
@@ -204,11 +204,11 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
        int cpus = 0;
        int n;
 
-       spin_lock_bh(&fi->lock);
+       spin_lock(&fi->lock);
        for (n = 0; n < KVM_MAX_VCPUS; n++)
                if (fi->local_int[n])
                        cpus++;
-       spin_unlock_bh(&fi->lock);
+       spin_unlock(&fi->lock);
 
        /* deal with other level 3 hypervisors */
        if (stsi(mem, 3, 2, 2) == -ENOSYS)
index f27dbedf086600280964164b207851c5f2f98dd2..36678835034d9f49a748f448fd943073e9974564 100644 (file)
@@ -52,7 +52,7 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
        if (cpu_addr >= KVM_MAX_VCPUS)
                return 3; /* not operational */
 
-       spin_lock_bh(&fi->lock);
+       spin_lock(&fi->lock);
        if (fi->local_int[cpu_addr] == NULL)
                rc = 3; /* not operational */
        else if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
@@ -64,7 +64,7 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
                *reg |= SIGP_STAT_STOPPED;
                rc = 1; /* status stored */
        }
-       spin_unlock_bh(&fi->lock);
+       spin_unlock(&fi->lock);
 
        VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
        return rc;
@@ -86,7 +86,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
 
        inti->type = KVM_S390_INT_EMERGENCY;
 
-       spin_lock_bh(&fi->lock);
+       spin_lock(&fi->lock);
        li = fi->local_int[cpu_addr];
        if (li == NULL) {
                rc = 3; /* not operational */
@@ -102,7 +102,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
        spin_unlock_bh(&li->lock);
        rc = 0; /* order accepted */
 unlock:
-       spin_unlock_bh(&fi->lock);
+       spin_unlock(&fi->lock);
        VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
        return rc;
 }
@@ -123,7 +123,7 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
 
        inti->type = KVM_S390_SIGP_STOP;
 
-       spin_lock_bh(&fi->lock);
+       spin_lock(&fi->lock);
        li = fi->local_int[cpu_addr];
        if (li == NULL) {
                rc = 3; /* not operational */
@@ -142,7 +142,7 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
        spin_unlock_bh(&li->lock);
        rc = 0; /* order accepted */
 unlock:
-       spin_unlock_bh(&fi->lock);
+       spin_unlock(&fi->lock);
        VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
        return rc;
 }
@@ -188,7 +188,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
        if (!inti)
                return 2; /* busy */
 
-       spin_lock_bh(&fi->lock);
+       spin_lock(&fi->lock);
        li = fi->local_int[cpu_addr];
 
        if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) {
@@ -220,7 +220,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
 out_li:
        spin_unlock_bh(&li->lock);
 out_fi:
-       spin_unlock_bh(&fi->lock);
+       spin_unlock(&fi->lock);
        return rc;
 }
 
index 19af42138f78ee8a95509276fc70178475fb95c0..4a28d22d479362ccf3a5bedbc8edb6499558e1ef 100644 (file)
 #define X86_FEATURE_XMM4_1     (4*32+19) /* "sse4_1" SSE-4.1 */
 #define X86_FEATURE_XMM4_2     (4*32+20) /* "sse4_2" SSE-4.2 */
 #define X86_FEATURE_X2APIC     (4*32+21) /* x2APIC */
+#define X86_FEATURE_MOVBE      (4*32+22) /* MOVBE instruction */
+#define X86_FEATURE_POPCNT      (4*32+23) /* POPCNT instruction */
 #define X86_FEATURE_AES                (4*32+25) /* AES instructions */
 #define X86_FEATURE_XSAVE      (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
 #define X86_FEATURE_OSXSAVE    (4*32+27) /* "" XSAVE enabled in the OS */
index dc3f6cf117045ee50b2e92aa87a9b772ce8150bd..125be8b19568e2828614d85b741d5ee2e2a53535 100644 (file)
@@ -16,6 +16,7 @@
 #define __KVM_HAVE_MSI
 #define __KVM_HAVE_USER_NMI
 #define __KVM_HAVE_GUEST_DEBUG
+#define __KVM_HAVE_MSIX
 
 /* Architectural interrupt line count. */
 #define KVM_NR_INTERRUPTS 256
index f0faf58044ff6c6215d1a78739b90a3b9d22336e..eabdc1cfab5c6143874569c5e92eb065d8966ae4 100644 (file)
@@ -185,6 +185,7 @@ union kvm_mmu_page_role {
                unsigned access:3;
                unsigned invalid:1;
                unsigned cr4_pge:1;
+               unsigned nxe:1;
        };
 };
 
@@ -212,7 +213,6 @@ struct kvm_mmu_page {
        int multimapped;         /* More than one parent_pte? */
        int root_count;          /* Currently serving as active root */
        bool unsync;
-       bool global;
        unsigned int unsync_children;
        union {
                u64 *parent_pte;               /* !multimapped */
@@ -261,13 +261,11 @@ struct kvm_mmu {
        union kvm_mmu_page_role base_role;
 
        u64 *pae_root;
+       u64 rsvd_bits_mask[2][4];
 };
 
 struct kvm_vcpu_arch {
        u64 host_tsc;
-       int interrupt_window_open;
-       unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
-       DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
        /*
         * rip and regs accesses must go through
         * kvm_{register,rip}_{read,write} functions.
@@ -286,6 +284,7 @@ struct kvm_vcpu_arch {
        u64 shadow_efer;
        u64 apic_base;
        struct kvm_lapic *apic;    /* kernel irqchip context */
+       int32_t apic_arb_prio;
        int mp_state;
        int sipi_vector;
        u64 ia32_misc_enable_msr;
@@ -320,6 +319,8 @@ struct kvm_vcpu_arch {
        struct kvm_pio_request pio;
        void *pio_data;
 
+       u8 event_exit_inst_len;
+
        struct kvm_queued_exception {
                bool pending;
                bool has_error_code;
@@ -329,11 +330,12 @@ struct kvm_vcpu_arch {
 
        struct kvm_queued_interrupt {
                bool pending;
+               bool soft;
                u8 nr;
        } interrupt;
 
        struct {
-               int active;
+               int vm86_active;
                u8 save_iopl;
                struct kvm_save_segment {
                        u16 selector;
@@ -356,9 +358,9 @@ struct kvm_vcpu_arch {
        unsigned int time_offset;
        struct page *time_page;
 
+       bool singlestep; /* guest is single stepped by KVM */
        bool nmi_pending;
        bool nmi_injected;
-       bool nmi_window_open;
 
        struct mtrr_state_type mtrr_state;
        u32 pat;
@@ -392,15 +394,14 @@ struct kvm_arch{
         */
        struct list_head active_mmu_pages;
        struct list_head assigned_dev_head;
-       struct list_head oos_global_pages;
        struct iommu_domain *iommu_domain;
+       int iommu_flags;
        struct kvm_pic *vpic;
        struct kvm_ioapic *vioapic;
        struct kvm_pit *vpit;
        struct hlist_head irq_ack_notifier_list;
        int vapics_in_nmi_mode;
 
-       int round_robin_prev_vcpu;
        unsigned int tss_addr;
        struct page *apic_access_page;
 
@@ -423,7 +424,6 @@ struct kvm_vm_stat {
        u32 mmu_recycled;
        u32 mmu_cache_miss;
        u32 mmu_unsync;
-       u32 mmu_unsync_global;
        u32 remote_tlb_flush;
        u32 lpages;
 };
@@ -443,7 +443,6 @@ struct kvm_vcpu_stat {
        u32 halt_exits;
        u32 halt_wakeup;
        u32 request_irq_exits;
-       u32 request_nmi_exits;
        u32 irq_exits;
        u32 host_state_reload;
        u32 efer_reload;
@@ -511,20 +510,22 @@ struct kvm_x86_ops {
        void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
        int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
        void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
+       void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
+       u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
        void (*patch_hypercall)(struct kvm_vcpu *vcpu,
                                unsigned char *hypercall_addr);
-       int (*get_irq)(struct kvm_vcpu *vcpu);
-       void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
+       void (*set_irq)(struct kvm_vcpu *vcpu);
+       void (*set_nmi)(struct kvm_vcpu *vcpu);
        void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
                                bool has_error_code, u32 error_code);
-       bool (*exception_injected)(struct kvm_vcpu *vcpu);
-       void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
-       void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
-                                      struct kvm_run *run);
-
+       int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
+       int (*nmi_allowed)(struct kvm_vcpu *vcpu);
+       void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
+       void (*enable_irq_window)(struct kvm_vcpu *vcpu);
+       void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
        int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
        int (*get_tdp_level)(void);
-       int (*get_mt_mask_shift)(void);
+       u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
 };
 
 extern struct kvm_x86_ops *kvm_x86_ops;
@@ -538,7 +539,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu);
 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
 void kvm_mmu_set_base_ptes(u64 base_pte);
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
-               u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask);
+               u64 dirty_mask, u64 nx_mask, u64 x_mask);
 
 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
@@ -552,6 +553,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
                          const void *val, int bytes);
 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
                  gpa_t addr, unsigned long *ret);
+u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
 
 extern bool tdp_enabled;
 
@@ -563,6 +565,7 @@ enum emulation_result {
 
 #define EMULTYPE_NO_DECODE         (1 << 0)
 #define EMULTYPE_TRAP_UD           (1 << 1)
+#define EMULTYPE_SKIP              (1 << 2)
 int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
                        unsigned long cr2, u16 error_code, int emulation_type);
 void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
@@ -638,7 +641,6 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
 int kvm_mmu_load(struct kvm_vcpu *vcpu);
 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
-void kvm_mmu_sync_global(struct kvm_vcpu *vcpu);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
@@ -769,6 +771,8 @@ enum {
 #define HF_GIF_MASK            (1 << 0)
 #define HF_HIF_MASK            (1 << 1)
 #define HF_VINTR_MASK          (1 << 2)
+#define HF_NMI_MASK            (1 << 3)
+#define HF_IRET_MASK           (1 << 4)
 
 /*
  * Hardware virtualization extension instructions may fault if a
@@ -791,5 +795,6 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
 #define KVM_ARCH_WANT_MMU_NOTIFIER
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_age_hva(struct kvm *kvm, unsigned long hva);
+int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
 
 #endif /* _ASM_X86_KVM_HOST_H */
index 6a159732881a6b4067fd552fd4576cce797a5926..b7ed2c423116be32942ef23b63e5dc4f09492d9d 100644 (file)
@@ -143,6 +143,9 @@ struct decode_cache {
        struct fetch_cache fetch;
 };
 
+#define X86_SHADOW_INT_MOV_SS  1
+#define X86_SHADOW_INT_STI     2
+
 struct x86_emulate_ctxt {
        /* Register state before/after emulation. */
        struct kvm_vcpu *vcpu;
@@ -152,6 +155,9 @@ struct x86_emulate_ctxt {
        int mode;
        u32 cs_base;
 
+       /* interruptibility state, as a result of execution of STI or MOV SS */
+       int interruptibility;
+
        /* decode cache */
        struct decode_cache decode;
 };
index 82ada75f3ebf142678325968ea6fa0952308e9a0..85574b7c1bc13c371c4c5bdc8d17146897f95645 100644 (file)
@@ -225,6 +225,7 @@ struct __attribute__ ((__packed__)) vmcb {
 #define SVM_EVTINJ_VALID_ERR (1 << 11)
 
 #define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
+#define SVM_EXITINTINFO_TYPE_MASK SVM_EVTINJ_TYPE_MASK
 
 #define        SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
 #define        SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
index f72956331c49349623014bffc4c241f01c361eee..c4ee8056bacaf9534c1fead0e7cee6b3e578a57b 100644 (file)
@@ -67,6 +67,7 @@ static inline int user_termio_to_kernel_termios(struct ktermios *termios,
        SET_LOW_TERMIOS_BITS(termios, termio, c_oflag);
        SET_LOW_TERMIOS_BITS(termios, termio, c_cflag);
        SET_LOW_TERMIOS_BITS(termios, termio, c_lflag);
+       get_user(termios->c_line, &termio->c_line);
        return copy_from_user(termios->c_cc, termio->c_cc, NCC);
 }
 
index 498f944010b9a112a013a5007a58117b66d6f6db..11be5ad2e0e9730384cacc29b96dc1d9345f728c 100644 (file)
@@ -247,6 +247,7 @@ enum vmcs_field {
 #define EXIT_REASON_MSR_READ            31
 #define EXIT_REASON_MSR_WRITE           32
 #define EXIT_REASON_MWAIT_INSTRUCTION   36
+#define EXIT_REASON_MCE_DURING_VMENTRY  41
 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
 #define EXIT_REASON_APIC_ACCESS         44
 #define EXIT_REASON_EPT_VIOLATION       48
index 09dd1d414fc36bf40eb8218619298e76bf1ac443..289cc481502887dd228161157002b297e4a5a73d 100644 (file)
@@ -420,6 +420,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
  out2:
        atomic_dec(&mce_entry);
 }
+EXPORT_SYMBOL_GPL(do_machine_check);
 
 #ifdef CONFIG_X86_MCE_INTEL
 /***
index 6551dedee20c71a58a8f902bdee88dd0e8c4d7f8..a78ecad0c900ff646b8c826c08cd54b094815b5a 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/hardirq.h>
+#include <asm/timer.h>
 
 #define MMU_QUEUE_SIZE 1024
 
@@ -230,6 +231,9 @@ static void paravirt_ops_setup(void)
                pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
                pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
        }
+#ifdef CONFIG_X86_IO_APIC
+       no_timer_check = 1;
+#endif
 }
 
 void __init kvm_guest_init(void)
index f6db48c405b81fca925988fa0fc408f078793722..28f5fb495a669e9b6843daf33a37baaac512fad7 100644 (file)
@@ -172,6 +172,9 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
 {
        ack_APIC_irq();
        inc_irq_stat(irq_resched_count);
+       /*
+        * KVM uses this interrupt to force a cpu out of guest mode
+        */
 }
 
 void smp_call_function_interrupt(struct pt_regs *regs)
index a58504ea78ccb90d766e8efbcc5563ef0b0d1303..8600a09e0c6c5cca00331605a92045d34268133b 100644 (file)
@@ -50,6 +50,9 @@ config KVM_INTEL
          Provides support for KVM on Intel processors equipped with the VT
          extensions.
 
+         To compile this as a module, choose M here: the module
+         will be called kvm-intel.
+
 config KVM_AMD
        tristate "KVM for AMD processors support"
        depends on KVM
@@ -57,6 +60,9 @@ config KVM_AMD
          Provides support for KVM on AMD processors equipped with the AMD-V
          (SVM) extensions.
 
+         To compile this as a module, choose M here: the module
+         will be called kvm-amd.
+
 config KVM_TRACE
        bool "KVM trace support"
        depends on KVM && SYSFS
index d3ec292f00f248e9d41e67813775f1867de8b0a5..b43c4efafe80133cab8ab26a4bbabc9e5fac263a 100644 (file)
@@ -14,7 +14,7 @@ endif
 EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
 
 kvm-objs := $(common-objs) x86.o mmu.o x86_emulate.o i8259.o irq.o lapic.o \
-       i8254.o
+       i8254.o timer.o
 obj-$(CONFIG_KVM) += kvm.o
 kvm-intel-objs = vmx.o
 obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
index c13bb92d3157708a52e211b1992a71f45554ffbb..4d6f0d293ee29bb303dbe5e4b81b4cb3c9e2b6e2 100644 (file)
@@ -98,6 +98,37 @@ static int pit_get_gate(struct kvm *kvm, int channel)
        return kvm->arch.vpit->pit_state.channels[channel].gate;
 }
 
+static s64 __kpit_elapsed(struct kvm *kvm)
+{
+       s64 elapsed;
+       ktime_t remaining;
+       struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+
+       /*
+        * The Counter does not stop when it reaches zero. In
+        * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to
+        * the highest count, either FFFF hex for binary counting
+        * or 9999 for BCD counting, and continues counting.
+        * Modes 2 and 3 are periodic; the Counter reloads
+        * itself with the initial count and continues counting
+        * from there.
+        */
+       remaining = hrtimer_expires_remaining(&ps->pit_timer.timer);
+       elapsed = ps->pit_timer.period - ktime_to_ns(remaining);
+       elapsed = mod_64(elapsed, ps->pit_timer.period);
+
+       return elapsed;
+}
+
+static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c,
+                       int channel)
+{
+       if (channel == 0)
+               return __kpit_elapsed(kvm);
+
+       return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
+}
+
 static int pit_get_count(struct kvm *kvm, int channel)
 {
        struct kvm_kpit_channel_state *c =
@@ -107,7 +138,7 @@ static int pit_get_count(struct kvm *kvm, int channel)
 
        WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
 
-       t = ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
+       t = kpit_elapsed(kvm, c, channel);
        d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
 
        switch (c->mode) {
@@ -137,7 +168,7 @@ static int pit_get_out(struct kvm *kvm, int channel)
 
        WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
 
-       t = ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
+       t = kpit_elapsed(kvm, c, channel);
        d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
 
        switch (c->mode) {
@@ -193,28 +224,6 @@ static void pit_latch_status(struct kvm *kvm, int channel)
        }
 }
 
-static int __pit_timer_fn(struct kvm_kpit_state *ps)
-{
-       struct kvm_vcpu *vcpu0 = ps->pit->kvm->vcpus[0];
-       struct kvm_kpit_timer *pt = &ps->pit_timer;
-
-       if (!atomic_inc_and_test(&pt->pending))
-               set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests);
-
-       if (!pt->reinject)
-               atomic_set(&pt->pending, 1);
-
-       if (vcpu0 && waitqueue_active(&vcpu0->wq))
-               wake_up_interruptible(&vcpu0->wq);
-
-       hrtimer_add_expires_ns(&pt->timer, pt->period);
-       pt->scheduled = hrtimer_get_expires_ns(&pt->timer);
-       if (pt->period)
-               ps->channels[0].count_load_time = ktime_get();
-
-       return (pt->period == 0 ? 0 : 1);
-}
-
 int pit_has_pending_timer(struct kvm_vcpu *vcpu)
 {
        struct kvm_pit *pit = vcpu->kvm->arch.vpit;
@@ -235,21 +244,6 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
        spin_unlock(&ps->inject_lock);
 }
 
-static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
-{
-       struct kvm_kpit_state *ps;
-       int restart_timer = 0;
-
-       ps = container_of(data, struct kvm_kpit_state, pit_timer.timer);
-
-       restart_timer = __pit_timer_fn(ps);
-
-       if (restart_timer)
-               return HRTIMER_RESTART;
-       else
-               return HRTIMER_NORESTART;
-}
-
 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
 {
        struct kvm_pit *pit = vcpu->kvm->arch.vpit;
@@ -263,15 +257,26 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
                hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
 }
 
-static void destroy_pit_timer(struct kvm_kpit_timer *pt)
+static void destroy_pit_timer(struct kvm_timer *pt)
 {
        pr_debug("pit: execute del timer!\n");
        hrtimer_cancel(&pt->timer);
 }
 
+static bool kpit_is_periodic(struct kvm_timer *ktimer)
+{
+       struct kvm_kpit_state *ps = container_of(ktimer, struct kvm_kpit_state,
+                                                pit_timer);
+       return ps->is_periodic;
+}
+
+static struct kvm_timer_ops kpit_ops = {
+       .is_periodic = kpit_is_periodic,
+};
+
 static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
 {
-       struct kvm_kpit_timer *pt = &ps->pit_timer;
+       struct kvm_timer *pt = &ps->pit_timer;
        s64 interval;
 
        interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
@@ -280,8 +285,14 @@ static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
 
        /* TODO The new value only affected after the retriggered */
        hrtimer_cancel(&pt->timer);
-       pt->period = (is_period == 0) ? 0 : interval;
-       pt->timer.function = pit_timer_fn;
+       pt->period = interval;
+       ps->is_periodic = is_period;
+
+       pt->timer.function = kvm_timer_fn;
+       pt->t_ops = &kpit_ops;
+       pt->kvm = ps->pit->kvm;
+       pt->vcpu_id = 0;
+
        atomic_set(&pt->pending, 0);
        ps->irq_ack = 1;
 
@@ -298,23 +309,23 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
        pr_debug("pit: load_count val is %d, channel is %d\n", val, channel);
 
        /*
-        * Though spec said the state of 8254 is undefined after power-up,
-        * seems some tricky OS like Windows XP depends on IRQ0 interrupt
-        * when booting up.
-        * So here setting initialize rate for it, and not a specific number
+        * The largest possible initial count is 0; this is equivalent
+        * to 216 for binary counting and 104 for BCD counting.
         */
        if (val == 0)
                val = 0x10000;
 
-       ps->channels[channel].count_load_time = ktime_get();
        ps->channels[channel].count = val;
 
-       if (channel != 0)
+       if (channel != 0) {
+               ps->channels[channel].count_load_time = ktime_get();
                return;
+       }
 
        /* Two types of timer
         * mode 1 is one shot, mode 2 is period, otherwise del timer */
        switch (ps->channels[0].mode) {
+       case 0:
        case 1:
         /* FIXME: enhance mode 4 precision */
        case 4:
index 6acbe4b505d5faad09d6b7d69e2cc64e870965cc..bbd863ff60b701591eee82be07993412af2adeb1 100644 (file)
@@ -3,15 +3,6 @@
 
 #include "iodev.h"
 
-struct kvm_kpit_timer {
-       struct hrtimer timer;
-       int irq;
-       s64 period; /* unit: ns */
-       s64 scheduled;
-       atomic_t pending;
-       bool reinject;
-};
-
 struct kvm_kpit_channel_state {
        u32 count; /* can be 65536 */
        u16 latched_count;
@@ -30,7 +21,8 @@ struct kvm_kpit_channel_state {
 
 struct kvm_kpit_state {
        struct kvm_kpit_channel_state channels[3];
-       struct kvm_kpit_timer pit_timer;
+       struct kvm_timer pit_timer;
+       bool is_periodic;
        u32    speaker_data_on;
        struct mutex lock;
        struct kvm_pit *pit;
index cf17ed52f6fbbe33563668604df0fa814f3cf493..96dfbb6ad2a9d2b1db365ef0c007f9d7056d9dda 100644 (file)
@@ -24,6 +24,7 @@
 
 #include "irq.h"
 #include "i8254.h"
+#include "x86.h"
 
 /*
  * check if there are pending timer events
@@ -48,6 +49,9 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
 {
        struct kvm_pic *s;
 
+       if (!irqchip_in_kernel(v->kvm))
+               return v->arch.interrupt.pending;
+
        if (kvm_apic_has_interrupt(v) == -1) {  /* LAPIC */
                if (kvm_apic_accept_pic_intr(v)) {
                        s = pic_irqchip(v->kvm);        /* PIC */
@@ -67,6 +71,9 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
        struct kvm_pic *s;
        int vector;
 
+       if (!irqchip_in_kernel(v->kvm))
+               return v->arch.interrupt.nr;
+
        vector = kvm_get_apic_interrupt(v);     /* APIC */
        if (vector == -1) {
                if (kvm_apic_accept_pic_intr(v)) {
diff --git a/arch/x86/kvm/kvm_timer.h b/arch/x86/kvm/kvm_timer.h
new file mode 100644 (file)
index 0000000..26bd6ba
--- /dev/null
@@ -0,0 +1,18 @@
+
+struct kvm_timer {
+       struct hrtimer timer;
+       s64 period;                             /* unit: ns */
+       atomic_t pending;                       /* accumulated triggered timers */
+       bool reinject;
+       struct kvm_timer_ops *t_ops;
+       struct kvm *kvm;
+       int vcpu_id;
+};
+
+struct kvm_timer_ops {
+        bool (*is_periodic)(struct kvm_timer *);
+};
+
+
+enum hrtimer_restart kvm_timer_fn(struct hrtimer *data);
+
index f0b67f2cdd695a71c636ddfbe45b9ec9248b0748..ae99d83f81a34384d72859a69233ace1dd8fa433 100644 (file)
@@ -196,20 +196,15 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
 
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
+static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
+                            int vector, int level, int trig_mode);
+
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
 
-       if (!apic_test_and_set_irr(vec, apic)) {
-               /* a new pending irq is set in IRR */
-               if (trig)
-                       apic_set_vector(vec, apic->regs + APIC_TMR);
-               else
-                       apic_clear_vector(vec, apic->regs + APIC_TMR);
-               kvm_vcpu_kick(apic->vcpu);
-               return 1;
-       }
-       return 0;
+       return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
+                       irq->level, irq->trig_mode);
 }
 
 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
@@ -250,7 +245,7 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
 
 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
 {
-       return kvm_apic_id(apic) == dest;
+       return dest == 0xff || kvm_apic_id(apic) == dest;
 }
 
 int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
@@ -279,37 +274,34 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
        return result;
 }
 
-static int apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
+int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
                           int short_hand, int dest, int dest_mode)
 {
        int result = 0;
        struct kvm_lapic *target = vcpu->arch.apic;
 
        apic_debug("target %p, source %p, dest 0x%x, "
-                  "dest_mode 0x%x, short_hand 0x%x",
+                  "dest_mode 0x%x, short_hand 0x%x\n",
                   target, source, dest, dest_mode, short_hand);
 
        ASSERT(!target);
        switch (short_hand) {
        case APIC_DEST_NOSHORT:
-               if (dest_mode == 0) {
+               if (dest_mode == 0)
                        /* Physical mode. */
-                       if ((dest == 0xFF) || (dest == kvm_apic_id(target)))
-                               result = 1;
-               } else
+                       result = kvm_apic_match_physical_addr(target, dest);
+               else
                        /* Logical mode. */
                        result = kvm_apic_match_logical_addr(target, dest);
                break;
        case APIC_DEST_SELF:
-               if (target == source)
-                       result = 1;
+               result = (target == source);
                break;
        case APIC_DEST_ALLINC:
                result = 1;
                break;
        case APIC_DEST_ALLBUT:
-               if (target != source)
-                       result = 1;
+               result = (target != source);
                break;
        default:
                printk(KERN_WARNING "Bad dest shorthand value %x\n",
@@ -327,20 +319,22 @@ static int apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
                             int vector, int level, int trig_mode)
 {
-       int orig_irr, result = 0;
+       int result = 0;
        struct kvm_vcpu *vcpu = apic->vcpu;
 
        switch (delivery_mode) {
-       case APIC_DM_FIXED:
        case APIC_DM_LOWEST:
+               vcpu->arch.apic_arb_prio++;
+       case APIC_DM_FIXED:
                /* FIXME add logic for vcpu on reset */
                if (unlikely(!apic_enabled(apic)))
                        break;
 
-               orig_irr = apic_test_and_set_irr(vector, apic);
-               if (orig_irr && trig_mode) {
-                       apic_debug("level trig mode repeatedly for vector %d",
-                                  vector);
+               result = !apic_test_and_set_irr(vector, apic);
+               if (!result) {
+                       if (trig_mode)
+                               apic_debug("level trig mode repeatedly for "
+                                               "vector %d", vector);
                        break;
                }
 
@@ -349,10 +343,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
                        apic_set_vector(vector, apic->regs + APIC_TMR);
                } else
                        apic_clear_vector(vector, apic->regs + APIC_TMR);
-
                kvm_vcpu_kick(vcpu);
-
-               result = (orig_irr == 0);
                break;
 
        case APIC_DM_REMRD:
@@ -364,12 +355,14 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
                break;
 
        case APIC_DM_NMI:
+               result = 1;
                kvm_inject_nmi(vcpu);
                kvm_vcpu_kick(vcpu);
                break;
 
        case APIC_DM_INIT:
                if (level) {
+                       result = 1;
                        if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
                                printk(KERN_DEBUG
                                       "INIT on a runnable vcpu %d\n",
@@ -386,6 +379,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
                apic_debug("SIPI to vcpu %d vector 0x%02x\n",
                           vcpu->vcpu_id, vector);
                if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
+                       result = 1;
                        vcpu->arch.sipi_vector = vector;
                        vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
                        kvm_vcpu_kick(vcpu);
@@ -408,43 +402,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
        return result;
 }
 
-static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
-                                      unsigned long bitmap)
-{
-       int last;
-       int next;
-       struct kvm_lapic *apic = NULL;
-
-       last = kvm->arch.round_robin_prev_vcpu;
-       next = last;
-
-       do {
-               if (++next == KVM_MAX_VCPUS)
-                       next = 0;
-               if (kvm->vcpus[next] == NULL || !test_bit(next, &bitmap))
-                       continue;
-               apic = kvm->vcpus[next]->arch.apic;
-               if (apic && apic_enabled(apic))
-                       break;
-               apic = NULL;
-       } while (next != last);
-       kvm->arch.round_robin_prev_vcpu = next;
-
-       if (!apic)
-               printk(KERN_DEBUG "vcpu not ready for apic_round_robin\n");
-
-       return apic;
-}
-
-struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
-               unsigned long bitmap)
+int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
 {
-       struct kvm_lapic *apic;
-
-       apic = kvm_apic_round_robin(kvm, vector, bitmap);
-       if (apic)
-               return apic->vcpu;
-       return NULL;
+       return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
 }
 
 static void apic_set_eoi(struct kvm_lapic *apic)
@@ -472,47 +432,24 @@ static void apic_send_ipi(struct kvm_lapic *apic)
 {
        u32 icr_low = apic_get_reg(apic, APIC_ICR);
        u32 icr_high = apic_get_reg(apic, APIC_ICR2);
+       struct kvm_lapic_irq irq;
 
-       unsigned int dest = GET_APIC_DEST_FIELD(icr_high);
-       unsigned int short_hand = icr_low & APIC_SHORT_MASK;
-       unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG;
-       unsigned int level = icr_low & APIC_INT_ASSERT;
-       unsigned int dest_mode = icr_low & APIC_DEST_MASK;
-       unsigned int delivery_mode = icr_low & APIC_MODE_MASK;
-       unsigned int vector = icr_low & APIC_VECTOR_MASK;
-
-       struct kvm_vcpu *target;
-       struct kvm_vcpu *vcpu;
-       unsigned long lpr_map = 0;
-       int i;
+       irq.vector = icr_low & APIC_VECTOR_MASK;
+       irq.delivery_mode = icr_low & APIC_MODE_MASK;
+       irq.dest_mode = icr_low & APIC_DEST_MASK;
+       irq.level = icr_low & APIC_INT_ASSERT;
+       irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
+       irq.shorthand = icr_low & APIC_SHORT_MASK;
+       irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
 
        apic_debug("icr_high 0x%x, icr_low 0x%x, "
                   "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
                   "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
-                  icr_high, icr_low, short_hand, dest,
-                  trig_mode, level, dest_mode, delivery_mode, vector);
-
-       for (i = 0; i < KVM_MAX_VCPUS; i++) {
-               vcpu = apic->vcpu->kvm->vcpus[i];
-               if (!vcpu)
-                       continue;
-
-               if (vcpu->arch.apic &&
-                   apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) {
-                       if (delivery_mode == APIC_DM_LOWEST)
-                               set_bit(vcpu->vcpu_id, &lpr_map);
-                       else
-                               __apic_accept_irq(vcpu->arch.apic, delivery_mode,
-                                                 vector, level, trig_mode);
-               }
-       }
+                  icr_high, icr_low, irq.shorthand, irq.dest_id,
+                  irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
+                  irq.vector);
 
-       if (delivery_mode == APIC_DM_LOWEST) {
-               target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map);
-               if (target != NULL)
-                       __apic_accept_irq(target->arch.apic, delivery_mode,
-                                         vector, level, trig_mode);
-       }
+       kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq);
 }
 
 static u32 apic_get_tmcct(struct kvm_lapic *apic)
@@ -527,12 +464,13 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
        if (apic_get_reg(apic, APIC_TMICT) == 0)
                return 0;
 
-       remaining = hrtimer_expires_remaining(&apic->timer.dev);
+       remaining = hrtimer_expires_remaining(&apic->lapic_timer.timer);
        if (ktime_to_ns(remaining) < 0)
                remaining = ktime_set(0, 0);
 
-       ns = mod_64(ktime_to_ns(remaining), apic->timer.period);
-       tmcct = div64_u64(ns, (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
+       ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
+       tmcct = div64_u64(ns,
+                        (APIC_BUS_CYCLE_NS * apic->divide_count));
 
        return tmcct;
 }
@@ -619,25 +557,25 @@ static void update_divide_count(struct kvm_lapic *apic)
        tdcr = apic_get_reg(apic, APIC_TDCR);
        tmp1 = tdcr & 0xf;
        tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
-       apic->timer.divide_count = 0x1 << (tmp2 & 0x7);
+       apic->divide_count = 0x1 << (tmp2 & 0x7);
 
        apic_debug("timer divide count is 0x%x\n",
-                                  apic->timer.divide_count);
+                                  apic->divide_count);
 }
 
 static void start_apic_timer(struct kvm_lapic *apic)
 {
-       ktime_t now = apic->timer.dev.base->get_time();
+       ktime_t now = apic->lapic_timer.timer.base->get_time();
 
-       apic->timer.period = apic_get_reg(apic, APIC_TMICT) *
-                   APIC_BUS_CYCLE_NS * apic->timer.divide_count;
-       atomic_set(&apic->timer.pending, 0);
+       apic->lapic_timer.period = apic_get_reg(apic, APIC_TMICT) *
+                   APIC_BUS_CYCLE_NS * apic->divide_count;
+       atomic_set(&apic->lapic_timer.pending, 0);
 
-       if (!apic->timer.period)
+       if (!apic->lapic_timer.period)
                return;
 
-       hrtimer_start(&apic->timer.dev,
-                     ktime_add_ns(now, apic->timer.period),
+       hrtimer_start(&apic->lapic_timer.timer,
+                     ktime_add_ns(now, apic->lapic_timer.period),
                      HRTIMER_MODE_ABS);
 
        apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
@@ -646,9 +584,9 @@ static void start_apic_timer(struct kvm_lapic *apic)
                           "expire @ 0x%016" PRIx64 ".\n", __func__,
                           APIC_BUS_CYCLE_NS, ktime_to_ns(now),
                           apic_get_reg(apic, APIC_TMICT),
-                          apic->timer.period,
+                          apic->lapic_timer.period,
                           ktime_to_ns(ktime_add_ns(now,
-                                       apic->timer.period)));
+                                       apic->lapic_timer.period)));
 }
 
 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
@@ -730,7 +668,7 @@ static void apic_mmio_write(struct kvm_io_device *this,
                                apic_set_reg(apic, APIC_LVTT + 0x10 * i,
                                             lvt_val | APIC_LVT_MASKED);
                        }
-                       atomic_set(&apic->timer.pending, 0);
+                       atomic_set(&apic->lapic_timer.pending, 0);
 
                }
                break;
@@ -762,7 +700,7 @@ static void apic_mmio_write(struct kvm_io_device *this,
                break;
 
        case APIC_TMICT:
-               hrtimer_cancel(&apic->timer.dev);
+               hrtimer_cancel(&apic->lapic_timer.timer);
                apic_set_reg(apic, APIC_TMICT, val);
                start_apic_timer(apic);
                return;
@@ -802,7 +740,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
        if (!vcpu->arch.apic)
                return;
 
-       hrtimer_cancel(&vcpu->arch.apic->timer.dev);
+       hrtimer_cancel(&vcpu->arch.apic->lapic_timer.timer);
 
        if (vcpu->arch.apic->regs_page)
                __free_page(vcpu->arch.apic->regs_page);
@@ -880,7 +818,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
        ASSERT(apic != NULL);
 
        /* Stop the timer in case it's a reset to an active apic */
-       hrtimer_cancel(&apic->timer.dev);
+       hrtimer_cancel(&apic->lapic_timer.timer);
 
        apic_set_reg(apic, APIC_ID, vcpu->vcpu_id << 24);
        apic_set_reg(apic, APIC_LVR, APIC_VERSION);
@@ -905,11 +843,13 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
                apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
        }
        update_divide_count(apic);
-       atomic_set(&apic->timer.pending, 0);
+       atomic_set(&apic->lapic_timer.pending, 0);
        if (vcpu->vcpu_id == 0)
                vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
        apic_update_ppr(apic);
 
+       vcpu->arch.apic_arb_prio = 0;
+
        apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
                   "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
                   vcpu, kvm_apic_id(apic),
@@ -917,16 +857,14 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_reset);
 
-int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
+bool kvm_apic_present(struct kvm_vcpu *vcpu)
 {
-       struct kvm_lapic *apic = vcpu->arch.apic;
-       int ret = 0;
-
-       if (!apic)
-               return 0;
-       ret = apic_enabled(apic);
+       return vcpu->arch.apic && apic_hw_enabled(vcpu->arch.apic);
+}
 
-       return ret;
+int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
+{
+       return kvm_apic_present(vcpu) && apic_sw_enabled(vcpu->arch.apic);
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_enabled);
 
@@ -936,22 +874,11 @@ EXPORT_SYMBOL_GPL(kvm_lapic_enabled);
  *----------------------------------------------------------------------
  */
 
-/* TODO: make sure __apic_timer_fn runs in current pCPU */
-static int __apic_timer_fn(struct kvm_lapic *apic)
+static bool lapic_is_periodic(struct kvm_timer *ktimer)
 {
-       int result = 0;
-       wait_queue_head_t *q = &apic->vcpu->wq;
-
-       if(!atomic_inc_and_test(&apic->timer.pending))
-               set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests);
-       if (waitqueue_active(q))
-               wake_up_interruptible(q);
-
-       if (apic_lvtt_period(apic)) {
-               result = 1;
-               hrtimer_add_expires_ns(&apic->timer.dev, apic->timer.period);
-       }
-       return result;
+       struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic,
+                                             lapic_timer);
+       return apic_lvtt_period(apic);
 }
 
 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -959,7 +886,7 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu)
        struct kvm_lapic *lapic = vcpu->arch.apic;
 
        if (lapic && apic_enabled(lapic) && apic_lvt_enabled(lapic, APIC_LVTT))
-               return atomic_read(&lapic->timer.pending);
+               return atomic_read(&lapic->lapic_timer.pending);
 
        return 0;
 }
@@ -986,20 +913,9 @@ void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
                kvm_apic_local_deliver(apic, APIC_LVT0);
 }
 
-static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
-{
-       struct kvm_lapic *apic;
-       int restart_timer = 0;
-
-       apic = container_of(data, struct kvm_lapic, timer.dev);
-
-       restart_timer = __apic_timer_fn(apic);
-
-       if (restart_timer)
-               return HRTIMER_RESTART;
-       else
-               return HRTIMER_NORESTART;
-}
+static struct kvm_timer_ops lapic_timer_ops = {
+       .is_periodic = lapic_is_periodic,
+};
 
 int kvm_create_lapic(struct kvm_vcpu *vcpu)
 {
@@ -1024,8 +940,13 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
        memset(apic->regs, 0, PAGE_SIZE);
        apic->vcpu = vcpu;
 
-       hrtimer_init(&apic->timer.dev, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-       apic->timer.dev.function = apic_timer_fn;
+       hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
+                    HRTIMER_MODE_ABS);
+       apic->lapic_timer.timer.function = kvm_timer_fn;
+       apic->lapic_timer.t_ops = &lapic_timer_ops;
+       apic->lapic_timer.kvm = vcpu->kvm;
+       apic->lapic_timer.vcpu_id = vcpu->vcpu_id;
+
        apic->base_address = APIC_DEFAULT_PHYS_BASE;
        vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE;
 
@@ -1078,9 +999,9 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
 
-       if (apic && atomic_read(&apic->timer.pending) > 0) {
+       if (apic && atomic_read(&apic->lapic_timer.pending) > 0) {
                if (kvm_apic_local_deliver(apic, APIC_LVTT))
-                       atomic_dec(&apic->timer.pending);
+                       atomic_dec(&apic->lapic_timer.pending);
        }
 }
 
@@ -1106,7 +1027,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
                             MSR_IA32_APICBASE_BASE;
        apic_set_reg(apic, APIC_LVR, APIC_VERSION);
        apic_update_ppr(apic);
-       hrtimer_cancel(&apic->timer.dev);
+       hrtimer_cancel(&apic->lapic_timer.timer);
        update_divide_count(apic);
        start_apic_timer(apic);
 }
@@ -1119,7 +1040,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
        if (!apic)
                return;
 
-       timer = &apic->timer.dev;
+       timer = &apic->lapic_timer.timer;
        if (hrtimer_cancel(timer))
                hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
 }
index 45ab6ee71209f586596447c0b1ad02feca125698..a587f8349c460d2e13d136e41002872ae278e0d5 100644 (file)
@@ -2,18 +2,15 @@
 #define __KVM_X86_LAPIC_H
 
 #include "iodev.h"
+#include "kvm_timer.h"
 
 #include <linux/kvm_host.h>
 
 struct kvm_lapic {
        unsigned long base_address;
        struct kvm_io_device dev;
-       struct {
-               atomic_t pending;
-               s64 period;     /* unit: ns */
-               u32 divide_count;
-               struct hrtimer dev;
-       } timer;
+       struct kvm_timer lapic_timer;
+       u32 divide_count;
        struct kvm_vcpu *vcpu;
        struct page *regs_page;
        void *regs;
@@ -34,12 +31,13 @@ u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
 
 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
 int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
 
 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
 void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu);
 int kvm_lapic_enabled(struct kvm_vcpu *vcpu);
+bool kvm_apic_present(struct kvm_vcpu *vcpu);
 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
 
 void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
index 32cf11e5728a2c1c444c407dfc287b8adf1c3c40..5c3d6e81a7dc61ec0b5ad6d952b77b96acb23b8f 100644 (file)
@@ -126,6 +126,7 @@ module_param(oos_shadow, bool, 0644);
 #define PFERR_PRESENT_MASK (1U << 0)
 #define PFERR_WRITE_MASK (1U << 1)
 #define PFERR_USER_MASK (1U << 2)
+#define PFERR_RSVD_MASK (1U << 3)
 #define PFERR_FETCH_MASK (1U << 4)
 
 #define PT_DIRECTORY_LEVEL 2
@@ -177,7 +178,11 @@ static u64 __read_mostly shadow_x_mask;    /* mutual exclusive with nx_mask */
 static u64 __read_mostly shadow_user_mask;
 static u64 __read_mostly shadow_accessed_mask;
 static u64 __read_mostly shadow_dirty_mask;
-static u64 __read_mostly shadow_mt_mask;
+
+static inline u64 rsvd_bits(int s, int e)
+{
+       return ((1ULL << (e - s + 1)) - 1) << s;
+}
 
 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
 {
@@ -193,14 +198,13 @@ void kvm_mmu_set_base_ptes(u64 base_pte)
 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
 
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
-               u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask)
+               u64 dirty_mask, u64 nx_mask, u64 x_mask)
 {
        shadow_user_mask = user_mask;
        shadow_accessed_mask = accessed_mask;
        shadow_dirty_mask = dirty_mask;
        shadow_nx_mask = nx_mask;
        shadow_x_mask = x_mask;
-       shadow_mt_mask = mt_mask;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
@@ -219,11 +223,6 @@ static int is_nx(struct kvm_vcpu *vcpu)
        return vcpu->arch.shadow_efer & EFER_NX;
 }
 
-static int is_present_pte(unsigned long pte)
-{
-       return pte & PT_PRESENT_MASK;
-}
-
 static int is_shadow_present_pte(u64 pte)
 {
        return pte != shadow_trap_nonpresent_pte
@@ -1074,18 +1073,10 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
        return NULL;
 }
 
-static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp)
-{
-       list_del(&sp->oos_link);
-       --kvm->stat.mmu_unsync_global;
-}
-
 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        WARN_ON(!sp->unsync);
        sp->unsync = 0;
-       if (sp->global)
-               kvm_unlink_unsync_global(kvm, sp);
        --kvm->stat.mmu_unsync;
 }
 
@@ -1248,7 +1239,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
        sp->gfn = gfn;
        sp->role = role;
-       sp->global = 0;
        hlist_add_head(&sp->hash_link, bucket);
        if (!direct) {
                if (rmap_write_protect(vcpu->kvm, gfn))
@@ -1616,7 +1606,7 @@ static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
        return mtrr_state->def_type;
 }
 
-static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
+u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
        u8 mtrr;
 
@@ -1626,6 +1616,7 @@ static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
                mtrr = MTRR_TYPE_WRBACK;
        return mtrr;
 }
+EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
 
 static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
@@ -1646,11 +1637,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
        ++vcpu->kvm->stat.mmu_unsync;
        sp->unsync = 1;
 
-       if (sp->global) {
-               list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages);
-               ++vcpu->kvm->stat.mmu_unsync_global;
-       } else
-               kvm_mmu_mark_parents_unsync(vcpu, sp);
+       kvm_mmu_mark_parents_unsync(vcpu, sp);
 
        mmu_convert_notrap(sp);
        return 0;
@@ -1677,21 +1664,11 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                    unsigned pte_access, int user_fault,
                    int write_fault, int dirty, int largepage,
-                   int global, gfn_t gfn, pfn_t pfn, bool speculative,
+                   gfn_t gfn, pfn_t pfn, bool speculative,
                    bool can_unsync)
 {
        u64 spte;
        int ret = 0;
-       u64 mt_mask = shadow_mt_mask;
-       struct kvm_mmu_page *sp = page_header(__pa(shadow_pte));
-
-       if (!global && sp->global) {
-               sp->global = 0;
-               if (sp->unsync) {
-                       kvm_unlink_unsync_global(vcpu->kvm, sp);
-                       kvm_mmu_mark_parents_unsync(vcpu, sp);
-               }
-       }
 
        /*
         * We don't set the accessed bit, since we sometimes want to see
@@ -1711,16 +1688,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                spte |= shadow_user_mask;
        if (largepage)
                spte |= PT_PAGE_SIZE_MASK;
-       if (mt_mask) {
-               if (!kvm_is_mmio_pfn(pfn)) {
-                       mt_mask = get_memory_type(vcpu, gfn) <<
-                               kvm_x86_ops->get_mt_mask_shift();
-                       mt_mask |= VMX_EPT_IGMT_BIT;
-               } else
-                       mt_mask = MTRR_TYPE_UNCACHABLE <<
-                               kvm_x86_ops->get_mt_mask_shift();
-               spte |= mt_mask;
-       }
+       if (tdp_enabled)
+               spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
+                       kvm_is_mmio_pfn(pfn));
 
        spte |= (u64)pfn << PAGE_SHIFT;
 
@@ -1765,8 +1735,8 @@ set_pte:
 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                         unsigned pt_access, unsigned pte_access,
                         int user_fault, int write_fault, int dirty,
-                        int *ptwrite, int largepage, int global,
-                        gfn_t gfn, pfn_t pfn, bool speculative)
+                        int *ptwrite, int largepage, gfn_t gfn,
+                        pfn_t pfn, bool speculative)
 {
        int was_rmapped = 0;
        int was_writeble = is_writeble_pte(*shadow_pte);
@@ -1795,7 +1765,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                        was_rmapped = 1;
        }
        if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault,
-                     dirty, largepage, global, gfn, pfn, speculative, true)) {
+                     dirty, largepage, gfn, pfn, speculative, true)) {
                if (write_fault)
                        *ptwrite = 1;
                kvm_x86_ops->tlb_flush(vcpu);
@@ -1843,7 +1813,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
                    || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
                        mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
                                     0, write, 1, &pt_write,
-                                    largepage, 0, gfn, pfn, false);
+                                    largepage, gfn, pfn, false);
                        ++vcpu->stat.pf_fixed;
                        break;
                }
@@ -1942,7 +1912,19 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
        vcpu->arch.mmu.root_hpa = INVALID_PAGE;
 }
 
-static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
+static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
+{
+       int ret = 0;
+
+       if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
+               set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+               ret = 1;
+       }
+
+       return ret;
+}
+
+static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
 {
        int i;
        gfn_t root_gfn;
@@ -1957,13 +1939,15 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                ASSERT(!VALID_PAGE(root));
                if (tdp_enabled)
                        direct = 1;
+               if (mmu_check_root(vcpu, root_gfn))
+                       return 1;
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
                                      PT64_ROOT_LEVEL, direct,
                                      ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
                vcpu->arch.mmu.root_hpa = root;
-               return;
+               return 0;
        }
        direct = !is_paging(vcpu);
        if (tdp_enabled)
@@ -1980,6 +1964,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                        root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
                } else if (vcpu->arch.mmu.root_level == 0)
                        root_gfn = 0;
+               if (mmu_check_root(vcpu, root_gfn))
+                       return 1;
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                      PT32_ROOT_LEVEL, direct,
                                      ACC_ALL, NULL);
@@ -1988,6 +1974,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
        }
        vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
+       return 0;
 }
 
 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
@@ -2006,7 +1993,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
        for (i = 0; i < 4; ++i) {
                hpa_t root = vcpu->arch.mmu.pae_root[i];
 
-               if (root) {
+               if (root && VALID_PAGE(root)) {
                        root &= PT64_BASE_ADDR_MASK;
                        sp = page_header(root);
                        mmu_sync_children(vcpu, sp);
@@ -2014,15 +2001,6 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
        }
 }
 
-static void mmu_sync_global(struct kvm_vcpu *vcpu)
-{
-       struct kvm *kvm = vcpu->kvm;
-       struct kvm_mmu_page *sp, *n;
-
-       list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link)
-               kvm_sync_page(vcpu, sp);
-}
-
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 {
        spin_lock(&vcpu->kvm->mmu_lock);
@@ -2030,13 +2008,6 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
        spin_unlock(&vcpu->kvm->mmu_lock);
 }
 
-void kvm_mmu_sync_global(struct kvm_vcpu *vcpu)
-{
-       spin_lock(&vcpu->kvm->mmu_lock);
-       mmu_sync_global(vcpu);
-       spin_unlock(&vcpu->kvm->mmu_lock);
-}
-
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
 {
        return vaddr;
@@ -2151,6 +2122,14 @@ static void paging_free(struct kvm_vcpu *vcpu)
        nonpaging_free(vcpu);
 }
 
+static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
+{
+       int bit7;
+
+       bit7 = (gpte >> 7) & 1;
+       return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
+}
+
 #define PTTYPE 64
 #include "paging_tmpl.h"
 #undef PTTYPE
@@ -2159,6 +2138,59 @@ static void paging_free(struct kvm_vcpu *vcpu)
 #include "paging_tmpl.h"
 #undef PTTYPE
 
+static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
+{
+       struct kvm_mmu *context = &vcpu->arch.mmu;
+       int maxphyaddr = cpuid_maxphyaddr(vcpu);
+       u64 exb_bit_rsvd = 0;
+
+       if (!is_nx(vcpu))
+               exb_bit_rsvd = rsvd_bits(63, 63);
+       switch (level) {
+       case PT32_ROOT_LEVEL:
+               /* no rsvd bits for 2 level 4K page table entries */
+               context->rsvd_bits_mask[0][1] = 0;
+               context->rsvd_bits_mask[0][0] = 0;
+               if (is_cpuid_PSE36())
+                       /* 36bits PSE 4MB page */
+                       context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
+               else
+                       /* 32 bits PSE 4MB page */
+                       context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
+               context->rsvd_bits_mask[1][0] = ~0ull;
+               break;
+       case PT32E_ROOT_LEVEL:
+               context->rsvd_bits_mask[0][2] =
+                       rsvd_bits(maxphyaddr, 63) |
+                       rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
+               context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 62);      /* PDE */
+               context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 62);      /* PTE */
+               context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 62) |
+                       rsvd_bits(13, 20);              /* large page */
+               context->rsvd_bits_mask[1][0] = ~0ull;
+               break;
+       case PT64_ROOT_LEVEL:
+               context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
+               context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
+               context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 51);
+               context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 51);
+               context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
+               context->rsvd_bits_mask[1][2] = context->rsvd_bits_mask[0][2];
+               context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
+                       rsvd_bits(maxphyaddr, 51) |
+                       rsvd_bits(13, 20);              /* large page */
+               context->rsvd_bits_mask[1][0] = ~0ull;
+               break;
+       }
+}
+
 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
 {
        struct kvm_mmu *context = &vcpu->arch.mmu;
@@ -2179,6 +2211,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
 
 static int paging64_init_context(struct kvm_vcpu *vcpu)
 {
+       reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
        return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
 }
 
@@ -2186,6 +2219,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *context = &vcpu->arch.mmu;
 
+       reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
        context->new_cr3 = paging_new_cr3;
        context->page_fault = paging32_page_fault;
        context->gva_to_gpa = paging32_gva_to_gpa;
@@ -2201,6 +2235,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
 
 static int paging32E_init_context(struct kvm_vcpu *vcpu)
 {
+       reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
        return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
 }
 
@@ -2221,12 +2256,15 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
                context->gva_to_gpa = nonpaging_gva_to_gpa;
                context->root_level = 0;
        } else if (is_long_mode(vcpu)) {
+               reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
                context->gva_to_gpa = paging64_gva_to_gpa;
                context->root_level = PT64_ROOT_LEVEL;
        } else if (is_pae(vcpu)) {
+               reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
                context->gva_to_gpa = paging64_gva_to_gpa;
                context->root_level = PT32E_ROOT_LEVEL;
        } else {
+               reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
                context->gva_to_gpa = paging32_gva_to_gpa;
                context->root_level = PT32_ROOT_LEVEL;
        }
@@ -2290,9 +2328,11 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
                goto out;
        spin_lock(&vcpu->kvm->mmu_lock);
        kvm_mmu_free_some_pages(vcpu);
-       mmu_alloc_roots(vcpu);
+       r = mmu_alloc_roots(vcpu);
        mmu_sync_roots(vcpu);
        spin_unlock(&vcpu->kvm->mmu_lock);
+       if (r)
+               goto out;
        kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
        kvm_mmu_flush_tlb(vcpu);
 out:
@@ -2638,14 +2678,6 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp);
 
 static void free_mmu_pages(struct kvm_vcpu *vcpu)
 {
-       struct kvm_mmu_page *sp;
-
-       while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
-               sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
-                                 struct kvm_mmu_page, link);
-               kvm_mmu_zap_page(vcpu->kvm, sp);
-               cond_resched();
-       }
        free_page((unsigned long)vcpu->arch.mmu.pae_root);
 }
 
@@ -2710,7 +2742,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 {
        struct kvm_mmu_page *sp;
 
-       spin_lock(&kvm->mmu_lock);
        list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
                int i;
                u64 *pt;
@@ -2725,7 +2756,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
                                pt[i] &= ~PT_WRITABLE_MASK;
        }
        kvm_flush_remote_tlbs(kvm);
-       spin_unlock(&kvm->mmu_lock);
 }
 
 void kvm_mmu_zap_all(struct kvm *kvm)
@@ -3007,11 +3037,13 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
                                       " in nonleaf level: levels %d gva %lx"
                                       " level %d pte %llx\n", audit_msg,
                                       vcpu->arch.mmu.root_level, va, level, ent);
-
-                       audit_mappings_page(vcpu, ent, va, level - 1);
+                       else
+                               audit_mappings_page(vcpu, ent, va, level - 1);
                } else {
                        gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
-                       hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
+                       gfn_t gfn = gpa >> PAGE_SHIFT;
+                       pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
+                       hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
 
                        if (is_shadow_present_pte(ent)
                            && (ent & PT64_BASE_ADDR_MASK) != hpa)
index eaab2145f62b21d31558372d511b16d8f1bc57d4..3494a2fb136eec376707176bf38e006cb9c03053 100644 (file)
@@ -75,4 +75,9 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
        return vcpu->arch.cr0 & X86_CR0_PG;
 }
 
+static inline int is_present_pte(unsigned long pte)
+{
+       return pte & PT_PRESENT_MASK;
+}
+
 #endif
index 6bd70206c56130ce95398c2c91a471ab471e9907..258e4591e1ca02246801fc380ae19472b2375b0a 100644 (file)
@@ -123,6 +123,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
        gfn_t table_gfn;
        unsigned index, pt_access, pte_access;
        gpa_t pte_gpa;
+       int rsvd_fault = 0;
 
        pgprintk("%s: addr %lx\n", __func__, addr);
 walk:
@@ -157,6 +158,10 @@ walk:
                if (!is_present_pte(pte))
                        goto not_present;
 
+               rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
+               if (rsvd_fault)
+                       goto access_error;
+
                if (write_fault && !is_writeble_pte(pte))
                        if (user_fault || is_write_protection(vcpu))
                                goto access_error;
@@ -209,7 +214,6 @@ walk:
                if (ret)
                        goto walk;
                pte |= PT_DIRTY_MASK;
-               kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte), 0);
                walker->ptes[walker->level - 1] = pte;
        }
 
@@ -233,6 +237,8 @@ err:
                walker->error_code |= PFERR_USER_MASK;
        if (fetch_fault)
                walker->error_code |= PFERR_FETCH_MASK;
+       if (rsvd_fault)
+               walker->error_code |= PFERR_RSVD_MASK;
        return 0;
 }
 
@@ -262,8 +268,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
        kvm_get_pfn(pfn);
        mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
                     gpte & PT_DIRTY_MASK, NULL, largepage,
-                    gpte & PT_GLOBAL_MASK, gpte_to_gfn(gpte),
-                    pfn, true);
+                    gpte_to_gfn(gpte), pfn, true);
 }
 
 /*
@@ -297,7 +302,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                                     user_fault, write_fault,
                                     gw->ptes[gw->level-1] & PT_DIRTY_MASK,
                                     ptwrite, largepage,
-                                    gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
                                     gw->gfn, pfn, false);
                        break;
                }
@@ -380,7 +384,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
                return r;
 
        /*
-        * Look up the shadow pte for the faulting address.
+        * Look up the guest pte for the faulting address.
         */
        r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
                             fetch_fault);
@@ -586,7 +590,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
                nr_present++;
                pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
                set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
-                        is_dirty_pte(gpte), 0, gpte & PT_GLOBAL_MASK, gfn,
+                        is_dirty_pte(gpte), 0, gfn,
                         spte_to_pfn(sp->spt[i]), true, false);
        }
 
index 1f8510c51d6e8abc2f315d9724e34708f49e2a6b..71510e07e69e062b5ce1626d6273cc2f03f39ccf 100644 (file)
@@ -19,6 +19,7 @@
 #include "irq.h"
 #include "mmu.h"
 #include "kvm_cache_regs.h"
+#include "x86.h"
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -69,7 +70,6 @@ module_param(npt, int, S_IRUGO);
 static int nested = 0;
 module_param(nested, int, S_IRUGO);
 
-static void kvm_reput_irq(struct vcpu_svm *svm);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
 
 static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override);
@@ -132,24 +132,6 @@ static inline u32 svm_has(u32 feat)
        return svm_features & feat;
 }
 
-static inline u8 pop_irq(struct kvm_vcpu *vcpu)
-{
-       int word_index = __ffs(vcpu->arch.irq_summary);
-       int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
-       int irq = word_index * BITS_PER_LONG + bit_index;
-
-       clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
-       if (!vcpu->arch.irq_pending[word_index])
-               clear_bit(word_index, &vcpu->arch.irq_summary);
-       return irq;
-}
-
-static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
-{
-       set_bit(irq, vcpu->arch.irq_pending);
-       set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
-}
-
 static inline void clgi(void)
 {
        asm volatile (__ex(SVM_CLGI));
@@ -214,17 +196,31 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
        svm->vmcb->control.event_inj_err = error_code;
 }
 
-static bool svm_exception_injected(struct kvm_vcpu *vcpu)
+static int is_external_interrupt(u32 info)
+{
+       info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
+       return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
+}
+
+static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
+       u32 ret = 0;
 
-       return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
+       if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
+               ret |= X86_SHADOW_INT_STI | X86_SHADOW_INT_MOV_SS;
+       return ret & mask;
 }
 
-static int is_external_interrupt(u32 info)
+static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
 {
-       info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
-       return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       if (mask == 0)
+               svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
+       else
+               svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
+
 }
 
 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
@@ -232,7 +228,9 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        if (!svm->next_rip) {
-               printk(KERN_DEBUG "%s: NOP\n", __func__);
+               if (emulate_instruction(vcpu, vcpu->run, 0, 0, EMULTYPE_SKIP) !=
+                               EMULATE_DONE)
+                       printk(KERN_DEBUG "%s: NOP\n", __func__);
                return;
        }
        if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
@@ -240,9 +238,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
                       __func__, kvm_rip_read(vcpu), svm->next_rip);
 
        kvm_rip_write(vcpu, svm->next_rip);
-       svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
-
-       vcpu->arch.interrupt_window_open = (svm->vcpu.arch.hflags & HF_GIF_MASK);
+       svm_set_interrupt_shadow(vcpu, 0);
 }
 
 static int has_svm(void)
@@ -830,6 +826,15 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
                if (!var->unusable)
                        var->type |= 0x1;
                break;
+       case VCPU_SREG_SS:
+               /* On AMD CPUs sometimes the DB bit in the segment
+                * descriptor is left as 1, although the whole segment has
+                * been made unusable. Clear it here to pass an Intel VMX
+                * entry check when cross vendor migrating.
+                */
+               if (var->unusable)
+                       var->db = 0;
+               break;
        }
 }
 
@@ -960,15 +965,16 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
 
 }
 
-static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
+static void update_db_intercept(struct kvm_vcpu *vcpu)
 {
-       int old_debug = vcpu->guest_debug;
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       vcpu->guest_debug = dbg->control;
-
        svm->vmcb->control.intercept_exceptions &=
                ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
+
+       if (vcpu->arch.singlestep)
+               svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
+
        if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
                if (vcpu->guest_debug &
                    (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
@@ -979,6 +985,16 @@ static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
                                1 << BP_VECTOR;
        } else
             &n