Merge remote branch 'intel/drm-intel-next' of /ssd/git/drm-next into drm-core-next
authorDave Airlie <airlied@redhat.com>
Tue, 3 Aug 2010 23:51:27 +0000 (09:51 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 3 Aug 2010 23:51:27 +0000 (09:51 +1000)
* 'intel/drm-intel-next' of /ssd/git/drm-next: (230 commits)
  drm/i915: Clear the Ironlake dithering flags when the pipe doesn't want it.
  drm/agp/i915: trim stolen space to 32M
  drm/i915: Unset cursor if out-of-bounds upon mode change (v4)
  drm/i915: Unreference object not handle on creation
  drm/i915: Attempt to uncouple object after catastrophic failure in unbind
  drm/i915: Repeat unbinding during free if interrupted (v6)
  drm/i915: Refactor i915_gem_retire_requests()
  drm/i915: Warn if we run out of FIFO space for a mode
  drm/i915: Round up the watermark entries (v3)
  drm/i915: Typo in (unused) register mask for overlay.
  drm/i915: Check overlay stride errata for i830 and i845
  drm/i915: Validate the mode for eDP by using fixed panel size
  drm/i915: Always use the fixed panel timing for eDP
  drm/i915: Enable panel fitting for eDP
  drm/i915: Add fixed panel mode parsed from EDID for eDP without fixed mode in VBT
  drm/i915/sdvo: Set sync polarity based on actual mode
  drm/i915/hdmi: Set sync polarity based on actual mode
  drm/i915/pch: Set transcoder sync polarity for DP based on actual mode
  drm/i915: Initialize LVDS and eDP outputs before anything else
  drm/i915/dp: Correctly report eDP in the core connector type
  ...

198 files changed:
Documentation/credentials.txt
Documentation/feature-removal-schedule.txt
Documentation/kernel-parameters.txt
MAINTAINERS
Makefile
arch/arm/boot/compressed/Makefile
arch/arm/common/sa1111.c
arch/arm/include/asm/io.h
arch/arm/lib/csumpartialcopyuser.S
arch/arm/mach-clps711x/include/mach/debug-macro.S
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-footbridge/common.c
arch/arm/mach-h720x/include/mach/debug-macro.S
arch/arm/mach-kirkwood/tsx1x-common.c
arch/arm/mach-kirkwood/tsx1x-common.h
arch/arm/mach-ns9xxx/include/mach/debug-macro.S
arch/arm/mach-ns9xxx/include/mach/uncompress.h
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-pxa/colibri-pxa300.c
arch/arm/mach-pxa/corgi.c
arch/arm/mach-pxa/cpufreq-pxa2xx.c
arch/arm/mach-pxa/pxa27x.c
arch/arm/mach-realview/core.c
arch/arm/mach-shark/include/mach/debug-macro.S
arch/arm/mach-ux500/include/mach/uncompress.h
arch/arm/mach-vexpress/v2m.c
arch/arm/mach-w90x900/cpu.c
arch/arm/mm/cache-l2x0.c
arch/arm/mm/highmem.c
arch/arm/plat-spear/include/plat/debug-macro.S
arch/avr32/include/asm/ioctls.h
arch/avr32/mach-at32ap/include/mach/board.h
arch/mips/alchemy/common/platform.c
arch/mips/alchemy/mtx-1/board_setup.c
arch/mips/bcm63xx/dev-enet.c
arch/mips/include/asm/atomic.h
arch/mips/include/asm/unistd.h
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/vdso.c
arch/mips/mti-malta/malta-pci.c
arch/mips/nxp/pnx8550/common/pci.c
arch/mips/nxp/pnx8550/common/setup.c
arch/mips/pci/ops-pmcmsp.c
arch/mips/pci/pci-yosemite.c
arch/mips/powertv/asic/asic_devices.c
arch/powerpc/include/asm/kexec.h
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/kernel/perf_event_fsl_emb.c
arch/powerpc/kernel/prom.c
arch/powerpc/mm/hash_low_64.S
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugetlbpage-hash64.c
arch/powerpc/mm/numa.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry64.S
arch/s390/kernel/time.c
arch/x86/kernel/acpi/cstate.c
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/hpet.c
arch/x86/kernel/i8259.c
arch/x86/kernel/kgdb.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/x86.c
drivers/acpi/acpica/evxfevnt.c
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/processor_core.c
drivers/acpi/processor_idle.c
drivers/acpi/sleep.c
drivers/base/core.c
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-agp.h
drivers/char/agp/intel-gtt.c
drivers/char/tpm/tpm_tis.c
drivers/cpufreq/cpufreq.c
drivers/edac/i7core_edac.c
drivers/edac/mpc85xx_edac.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/mmc/host/mmci.c
drivers/net/bnx2x.h
drivers/net/bnx2x_main.c
drivers/net/bonding/bond_alb.c
drivers/net/declance.c
drivers/net/igb/igb_main.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/s2io.h
drivers/net/tun.c
drivers/net/wimax/i2400m/i2400m-usb.h
drivers/net/wimax/i2400m/usb.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/pcmcia/pxa2xx_base.c
drivers/power/ds2782_battery.c
drivers/regulator/ab3100.c
drivers/regulator/tps6507x-regulator.c
drivers/regulator/wm8350-regulator.c
drivers/rtc/rtc-rx8581.c
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_fsf.c
drivers/s390/scsi/zfcp_qdio.c
drivers/scsi/ibmvscsi/rpa_vscsi.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/serial/atmel_serial.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/gadget/pxa27x_udc.c
drivers/usb/gadget/s3c2410_udc.c
drivers/usb/host/ohci-pxa27x.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/sisusbvga/sisusb.c
drivers/usb/musb/tusb6010.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/sierra.c
drivers/usb/storage/transport.c
drivers/video/au1100fb.c
drivers/video/cyber2000fb.c
drivers/video/gbefb.c
drivers/video/pmag-ba-fb.c
drivers/video/pmagb-b-fb.c
drivers/virtio/virtio_ring.c
fs/9p/vfs_dir.c
fs/ceph/Kconfig
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mon_client.c
fs/ceph/osd_client.c
fs/ceph/osdmap.c
fs/cifs/dns_resolve.c
fs/cifs/dns_resolve.h
fs/ecryptfs/messaging.c
fs/gfs2/dir.c
fs/nfs/file.c
fs/nfs/nfsroot.c
fs/nfs/write.c
fs/proc/array.c
fs/sysfs/symlink.c
include/acpi/processor.h
include/asm-generic/vmlinux.lds.h
include/linux/acpi.h
include/linux/cred.h
include/linux/if_macvlan.h
include/linux/nfs_fs.h
include/linux/regulator/tps6507x.h [new file with mode: 0644]
include/linux/sched.h
include/net/tc_act/tc_mirred.h
kernel/cred.c
kernel/module.c
mm/memory.c
net/core/dev.c
net/core/skbuff.c
net/ipv6/addrconf.c
net/mac80211/cfg.c
net/sched/act_mirred.c
scripts/kconfig/nconf.gui.c
scripts/package/Makefile
scripts/setlocalversion
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_nvhdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/au1x/psc-i2s.c
tools/perf/Makefile
tools/perf/util/hist.c
tools/perf/util/symbol.c

index a2db352870036ba062aa177ce450d0d491f7d8c8..995baf379c076770962a84268fde19b527caa0be 100644 (file)
@@ -417,6 +417,9 @@ reference on them using:
 This does all the RCU magic inside of it.  The caller must call put_cred() on
 the credentials so obtained when they're finished with.
 
+ [*] Note: The result of __task_cred() should not be passed directly to
+     get_cred() as this may race with commit_cred().
+
 There are a couple of convenience functions to access bits of another task's
 credentials, hiding the RCU magic from the caller:
 
index c268783bc4e7c18c2e4db792c3fba83d6bc44c71..1571c0c83dba0b0876ea52e4ff85ff69a56dab87 100644 (file)
@@ -647,3 +647,10 @@ Who:       Stefan Richter <stefanr@s5r6.in-berlin.de>
 
 ----------------------------
 
+What:  The acpi_sleep=s4_nonvs command line option
+When:  2.6.37
+Files: arch/x86/kernel/acpi/sleep.c
+Why:   superseded by acpi_sleep=nonvs
+Who:   Rafael J. Wysocki <rjw@sisk.pl>
+
+----------------------------
index 4ddb58df081e360311e22cafe40273d3176de46d..2b2407d9a6d025a8451605b104a35daa7da59b47 100644 (file)
@@ -254,8 +254,8 @@ and is between 256 and 4096 characters. It is defined in the file
                        control method, with respect to putting devices into
                        low power states, to be enforced (the ACPI 2.0 ordering
                        of _PTS is used by default).
-                       s4_nonvs prevents the kernel from saving/restoring the
-                       ACPI NVS memory during hibernation.
+                       nonvs prevents the kernel from saving/restoring the
+                       ACPI NVS memory during suspend/hibernation and resume.
                        sci_force_enable causes the kernel to set SCI_EN directly
                        on resume from S1/S3 (which is against the ACPI spec,
                        but some broken systems don't work without it).
index db3d0f5061f9c42e301e420878b0dc5cbe31e6e1..02f75fccac20915ae4c7aad6b333ed0061f90e44 100644 (file)
@@ -6243,6 +6243,8 @@ F:        drivers/mmc/host/wbsd.*
 
 WATCHDOG DEVICE DRIVERS
 M:     Wim Van Sebroeck <wim@iguana.be>
+L:     linux-watchdog@vger.kernel.org
+W:     http://www.linux-watchdog.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog.git
 S:     Maintained
 F:     Documentation/watchdog/
index 886bf04931d4aea56c26761b26898a8f393fb9ce..141da26fda4b87870ba19237f61ced915d0cebcb 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 35
-EXTRAVERSION = -rc6
+EXTRAVERSION =
 NAME = Sheep on Meth
 
 # *DOCUMENTATION*
index 53faa9063a035be790c3aef7c5f32c4f48eb8025..864a002137fed0bbc42efe7b4f2dc787a06ba48a 100644 (file)
@@ -71,6 +71,9 @@ targets       := vmlinux vmlinux.lds \
                 piggy.$(suffix_y) piggy.$(suffix_y).o \
                 font.o font.c head.o misc.o $(OBJS)
 
+# Make sure files are removed during clean
+extra-y       += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S
+
 ifeq ($(CONFIG_FUNCTION_TRACER),y)
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
index 6f80665f477e70b0acad4d6ead228295446863c3..9eaf65f43642e6886b3ab4bae43e7821aad285b6 100644 (file)
@@ -1028,13 +1028,12 @@ static int sa1111_remove(struct platform_device *pdev)
        struct sa1111 *sachip = platform_get_drvdata(pdev);
 
        if (sachip) {
-               __sa1111_remove(sachip);
-               platform_set_drvdata(pdev, NULL);
-
 #ifdef CONFIG_PM
                kfree(sachip->saved_state);
                sachip->saved_state = NULL;
 #endif
+               __sa1111_remove(sachip);
+               platform_set_drvdata(pdev, NULL);
        }
 
        return 0;
index c980156f32638f8f031b93e730d0c2e8396aec40..1261b1f928d95a9ac1cf374148208415c4dc248d 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/types.h>
 #include <asm/byteorder.h>
 #include <asm/memory.h>
+#include <asm/system.h>
 
 /*
  * ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -179,25 +180,38 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
  * IO port primitives for more information.
  */
 #ifdef __mem_pci
-#define readb(c) ({ __u8  __v = __raw_readb(__mem_pci(c)); __v; })
-#define readw(c) ({ __u16 __v = le16_to_cpu((__force __le16) \
+#define readb_relaxed(c) ({ u8  __v = __raw_readb(__mem_pci(c)); __v; })
+#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \
                                        __raw_readw(__mem_pci(c))); __v; })
-#define readl(c) ({ __u32 __v = le32_to_cpu((__force __le32) \
+#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \
                                        __raw_readl(__mem_pci(c))); __v; })
-#define readb_relaxed(addr) readb(addr)
-#define readw_relaxed(addr) readw(addr)
-#define readl_relaxed(addr) readl(addr)
+
+#define writeb_relaxed(v,c)    ((void)__raw_writeb(v,__mem_pci(c)))
+#define writew_relaxed(v,c)    ((void)__raw_writew((__force u16) \
+                                       cpu_to_le16(v),__mem_pci(c)))
+#define writel_relaxed(v,c)    ((void)__raw_writel((__force u32) \
+                                       cpu_to_le32(v),__mem_pci(c)))
+
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+#define __iormb()              rmb()
+#define __iowmb()              wmb()
+#else
+#define __iormb()              do { } while (0)
+#define __iowmb()              do { } while (0)
+#endif
+
+#define readb(c)               ({ u8  __v = readb_relaxed(c); __iormb(); __v; })
+#define readw(c)               ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+#define readl(c)               ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+
+#define writeb(v,c)            ({ __iowmb(); writeb_relaxed(v,c); })
+#define writew(v,c)            ({ __iowmb(); writew_relaxed(v,c); })
+#define writel(v,c)            ({ __iowmb(); writel_relaxed(v,c); })
 
 #define readsb(p,d,l)          __raw_readsb(__mem_pci(p),d,l)
 #define readsw(p,d,l)          __raw_readsw(__mem_pci(p),d,l)
 #define readsl(p,d,l)          __raw_readsl(__mem_pci(p),d,l)
 
-#define writeb(v,c)            __raw_writeb(v,__mem_pci(c))
-#define writew(v,c)            __raw_writew((__force __u16) \
-                                       cpu_to_le16(v),__mem_pci(c))
-#define writel(v,c)            __raw_writel((__force __u32) \
-                                       cpu_to_le32(v),__mem_pci(c))
-
 #define writesb(p,d,l)         __raw_writesb(__mem_pci(p),d,l)
 #define writesw(p,d,l)         __raw_writesw(__mem_pci(p),d,l)
 #define writesl(p,d,l)         __raw_writesl(__mem_pci(p),d,l)
@@ -244,13 +258,13 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
  * io{read,write}{8,16,32} macros
  */
 #ifndef ioread8
-#define ioread8(p)     ({ unsigned int __v = __raw_readb(p); __v; })
-#define ioread16(p)    ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __v; })
-#define ioread32(p)    ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __v; })
+#define ioread8(p)     ({ unsigned int __v = __raw_readb(p); __iormb(); __v; })
+#define ioread16(p)    ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32(p)    ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; })
 
-#define iowrite8(v,p)  __raw_writeb(v, p)
-#define iowrite16(v,p) __raw_writew((__force __u16)cpu_to_le16(v), p)
-#define iowrite32(v,p) __raw_writel((__force __u32)cpu_to_le32(v), p)
+#define iowrite8(v,p)  ({ __iowmb(); (void)__raw_writeb(v, p); })
+#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); })
+#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); })
 
 #define ioread8_rep(p,d,c)     __raw_readsb(p,d,c)
 #define ioread16_rep(p,d,c)    __raw_readsw(p,d,c)
index 59ff6fdc1e634835575846693fb0828a05c66a55..7d08b43d2c0e496fc704378301b82d3b34d93f3a 100644 (file)
@@ -71,7 +71,7 @@
                .pushsection .fixup,"ax"
                .align  4
 9001:          mov     r4, #-EFAULT
-               ldr     r5, [fp, #4]            @ *err_ptr
+               ldr     r5, [sp, #8*4]          @ *err_ptr
                str     r4, [r5]
                ldmia   sp, {r1, r2}            @ retrieve dst, len
                add     r2, r2, r1
index fedd8076a689a3555a986b5e96bc9348cf4ae77f..072cc6b61ba30b7b6e363040b40d409e8a43e38f 100644 (file)
@@ -11,6 +11,7 @@
  *
 */
 
+#include <mach/hardware.h>
 #include <asm/hardware/clps7111.h>
 
                .macro  addruart, rx, tmp
index 2ec3095ffb7b564e61a54bcc4e366f32b87e671c..b280efb1fa120806b1cfe85bdea1f90d1bc0caaf 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
 #include <linux/regulator/machine.h>
+#include <linux/regulator/tps6507x.h>
 #include <linux/mfd/tps6507x.h>
 #include <linux/input/tps6507x-ts.h>
 
@@ -469,6 +470,11 @@ struct regulator_consumer_supply tps65070_ldo2_consumers[] = {
        },
 };
 
+/* We take advantage of the fact that both defdcdc{2,3} are tied high */
+static struct tps6507x_reg_platform_data tps6507x_platform_data = {
+       .defdcdc_default = true,
+};
+
 struct regulator_init_data tps65070_regulator_data[] = {
        /* dcdc1 */
        {
@@ -494,6 +500,7 @@ struct regulator_init_data tps65070_regulator_data[] = {
                },
                .num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc2_consumers),
                .consumer_supplies = tps65070_dcdc2_consumers,
+               .driver_data = &tps6507x_platform_data,
        },
 
        /* dcdc3 */
@@ -507,6 +514,7 @@ struct regulator_init_data tps65070_regulator_data[] = {
                },
                .num_consumer_supplies = ARRAY_SIZE(tps65070_dcdc3_consumers),
                .consumer_supplies = tps65070_dcdc3_consumers,
+               .driver_data = &tps6507x_platform_data,
        },
 
        /* ldo1 */
index e3bc3f6f6b105d42c8d4f73d33b4d5f92fc15a81..88b3dd89be89a20b65761de100c7799798ccb1df 100644 (file)
@@ -232,7 +232,7 @@ EXPORT_SYMBOL(__bus_to_virt);
 
 unsigned long __pfn_to_bus(unsigned long pfn)
 {
-       return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET));
+       return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET);
 }
 EXPORT_SYMBOL(__pfn_to_bus);
 
index a9ee8f0d48b7f6792ea5004844649c3acb3a5bd0..27cafd12f033cbba19bd507e5a90ac198c1db8e1 100644 (file)
  *
 */
 
-               .equ    io_virt, IO_BASE
-               .equ    io_phys, IO_START
+#include <mach/hardware.h>
+
+               .equ    io_virt, IO_VIRT
+               .equ    io_phys, IO_PHYS
 
                .macro  addruart, rx, tmp
                mrc     p15, 0, \rx, c1, c0
index 7221c20b2afa241b5854c90b90f24352b73572c8..f781164e623fc89e7a4362b3d039f9e58688fd0d 100644 (file)
@@ -77,7 +77,7 @@ struct spi_board_info __initdata qnap_tsx1x_spi_slave_info[] = {
        },
 };
 
-void qnap_tsx1x_register_flash(void)
+void __init qnap_tsx1x_register_flash(void)
 {
        spi_register_board_info(qnap_tsx1x_spi_slave_info,
                                ARRAY_SIZE(qnap_tsx1x_spi_slave_info));
index 9a592962a6ea0df6f7546d847a50b150d45243c1..7fa037361b55a5e23738f9a37c8637f64d91b56e 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __ARCH_KIRKWOOD_TSX1X_COMMON_H
 #define __ARCH_KIRKWOOD_TSX1X_COMMON_H
 
-extern void qnap_tsx1x_register_flash(void);
+extern void __init qnap_tsx1x_register_flash(void);
 extern void qnap_tsx1x_power_off(void);
 
 #endif
index 0859336a8e6d1be71a8090c1e8ef53b1640c87bc..5c934bdb71583e037c59fdadc3490e43838ff425 100644 (file)
@@ -8,6 +8,7 @@
  * the Free Software Foundation.
  */
 #include <mach/hardware.h>
+#include <asm/memory.h>
 
 #include <mach/regs-board-a9m9750dev.h>
 
index 1b12d324b087d1528c87d828bebec3308be1dbaa..770a68c46e8114f4a7360b559e0ddecd15cfdc57 100644 (file)
@@ -20,50 +20,49 @@ static void putc_dummy(char c, void __iomem *base)
        /* nothing */
 }
 
+static int timeout;
+
 static void putc_ns9360(char c, void __iomem *base)
 {
-       static int t = 0x10000;
        do {
-               if (t)
-                       --t;
+               if (timeout)
+                       --timeout;
 
                if (__raw_readl(base + 8) & (1 << 3)) {
                        __raw_writeb(c, base + 16);
-                       t = 0x10000;
+                       timeout = 0x10000;
                        break;
                }
-       } while (t);
+       } while (timeout);
 }
 
 static void putc_a9m9750dev(char c, void __iomem *base)
 {
-       static int t = 0x10000;
        do {
-               if (t)
-                       --t;
+               if (timeout)
+                       --timeout;
 
                if (__raw_readb(base + 5) & (1 << 5)) {
                        __raw_writeb(c, base);
-                       t = 0x10000;
+                       timeout = 0x10000;
                        break;
                }
-       } while (t);
+       } while (timeout);
 
 }
 
 static void putc_ns921x(char c, void __iomem *base)
 {
-       static int t = 0x10000;
        do {
-               if (t)
-                       --t;
+               if (timeout)
+                       --timeout;
 
                if (!(__raw_readl(base) & (1 << 11))) {
                        __raw_writeb(c, base + 0x0028);
-                       t = 0x10000;
+                       timeout = 0x10000;
                        break;
                }
-       } while (t);
+       } while (timeout);
 }
 
 #define MSCS __REG(0xA0900184)
@@ -89,6 +88,7 @@ static void putc_ns921x(char c, void __iomem *base)
 
 static void autodetect(void (**putc)(char, void __iomem *), void __iomem **base)
 {
+       timeout = 0x10000;
        if (((__raw_readl(MSCS) >> 16) & 0xfe) == 0x00) {
                /* ns9360 or ns9750 */
                if (NS9360_UART_ENABLED(NS9360_UARTA)) {
index c5555ca13d00952f901e10268dc6d0d6ea5963c4..03483920ed6e0b12fa007c3438d3ea4401f8ec7f 100644 (file)
@@ -220,10 +220,10 @@ static int board_keymap[] = {
        KEY(4, 4, KEY_LEFTCTRL),
        KEY(4, 5, KEY_RIGHTALT),
        KEY(4, 6, KEY_LEFTSHIFT),
-       KEY(4, 8, KEY_10),
+       KEY(4, 8, KEY_F10),
 
        KEY(5, 0, KEY_Y),
-       KEY(5, 8, KEY_11),
+       KEY(5, 8, KEY_F11),
 
        KEY(6, 0, KEY_U),
 
index 45c23fd6df31e9696d2b0c42be0752cba1683217..40b6ac2de876cf8dcc4c0328b72b086e15253eb8 100644 (file)
@@ -26,6 +26,7 @@
 #include <mach/colibri.h>
 #include <mach/ohci.h>
 #include <mach/pxafb.h>
+#include <mach/audio.h>
 
 #include "generic.h"
 #include "devices.h"
@@ -145,7 +146,7 @@ static void __init colibri_pxa300_init_lcd(void)
 static inline void colibri_pxa300_init_lcd(void) {}
 #endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */
 
-#if defined(SND_AC97_CODEC) || defined(SND_AC97_CODEC_MODULE)
+#if defined(CONFIG_SND_AC97_CODEC) || defined(CONFIG_SND_AC97_CODEC_MODULE)
 static mfp_cfg_t colibri_pxa310_ac97_pin_config[] __initdata = {
        GPIO24_AC97_SYSCLK,
        GPIO23_AC97_nACRESET,
index 3d1dcb9ac08f497fc63e333ee6d20438f20d0d28..51ffa6afb67530e5cde74659319584ac52646593 100644 (file)
@@ -446,7 +446,7 @@ static struct platform_device corgiled_device = {
 static struct pxamci_platform_data corgi_mci_platform_data = {
        .detect_delay_ms        = 250,
        .ocr_mask               = MMC_VDD_32_33|MMC_VDD_33_34,
-       .gpio_card_detect       = -1,
+       .gpio_card_detect       = CORGI_GPIO_nSD_DETECT,
        .gpio_card_ro           = CORGI_GPIO_nSD_WP,
        .gpio_power             = CORGI_GPIO_SD_PWR,
 };
index 9e4d9816726ad5d75c0657f9a32d0b36c0ec3aa7..268a9bc6be8a22a4ca0ac4fed742d035753581a3 100644 (file)
@@ -256,13 +256,9 @@ static void init_sdram_rows(void)
 
 static u32 mdrefr_dri(unsigned int freq)
 {
-       u32 dri = 0;
+       u32 interval = freq * SDRAM_TREF / sdram_rows;
 
-       if (cpu_is_pxa25x())
-               dri = ((freq * SDRAM_TREF) / (sdram_rows * 32));
-       if (cpu_is_pxa27x())
-               dri = ((freq * SDRAM_TREF) / (sdram_rows - 31)) / 32;
-       return dri;
+       return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
 }
 
 /* find a valid frequency point */
index 0af36177ff085c92f34cd729bd5775f25d948a2f..c059dac02b61412b64b22f869d2e8913404236ed 100644 (file)
@@ -41,10 +41,10 @@ void pxa27x_clear_otgph(void)
 EXPORT_SYMBOL(pxa27x_clear_otgph);
 
 static unsigned long ac97_reset_config[] = {
-       GPIO95_AC97_nRESET,
-       GPIO95_GPIO,
-       GPIO113_AC97_nRESET,
        GPIO113_GPIO,
+       GPIO113_AC97_nRESET,
+       GPIO95_GPIO,
+       GPIO95_AC97_nRESET,
 };
 
 void pxa27x_assert_ac97reset(int reset_gpio, int on)
index 595be19f8ad5027e29daed72bbe0fb2c5d2d1a04..02e9fdeb8faf26891bdf43cb5846ae717fb720ea 100644 (file)
@@ -237,7 +237,7 @@ static unsigned int realview_mmc_status(struct device *dev)
        else
                mask = 2;
 
-       return !(readl(REALVIEW_SYSMCI) & mask);
+       return readl(REALVIEW_SYSMCI) & mask;
 }
 
 struct mmci_platform_data realview_mmc0_plat_data = {
index 50f071c5bf4d603656a01a9c8c2865bf07ee2eb9..5ea24d4d1ba6e176985b16076b48412952de9aec 100644 (file)
@@ -20,6 +20,9 @@
                strb    \rd, [\rx]
                .endm
 
+               .macro waituart,rd,rx
+               .endm
+
                .macro  busyuart,rd,rx
                mov     \rd, #0
 1001:          add     \rd, \rd, #1
index 8552eb188b50274c7ed4b18ccf48e743192652de..0271ca0a83df86c4fb43eb37b3c93754414d706d 100644 (file)
 static void putc(const char c)
 {
        /* Do nothing if the UART is not enabled. */
-       if (!(readb(U8500_UART_CR) & 0x1))
+       if (!(__raw_readb(U8500_UART_CR) & 0x1))
                return;
 
        if (c == '\n')
                putc('\r');
 
-       while (readb(U8500_UART_FR) & (1 << 5))
+       while (__raw_readb(U8500_UART_FR) & (1 << 5))
                barrier();
-       writeb(c, U8500_UART_DR);
+       __raw_writeb(c, U8500_UART_DR);
 }
 
 static void flush(void)
 {
-       if (!(readb(U8500_UART_CR) & 0x1))
+       if (!(__raw_readb(U8500_UART_CR) & 0x1))
                return;
-       while (readb(U8500_UART_FR) & (1 << 3))
+       while (__raw_readb(U8500_UART_FR) & (1 << 3))
                barrier();
 }
 
index d250711b8c7a4e3c17ee2ddcc53ad5b2e24ee420..c84239761cb4a2881d226f6f82b3acb694d30dae 100644 (file)
@@ -241,7 +241,7 @@ static struct platform_device v2m_flash_device = {
 
 static unsigned int v2m_mmci_status(struct device *dev)
 {
-       return !(readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0));
+       return readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0);
 }
 
 static struct mmci_platform_data v2m_mmci_data = {
index 642207e18198db20777ef475a27ada0bd2ae0c3d..83c56324a472f16a5f155df056b4cc29ba9e562d 100644 (file)
@@ -93,7 +93,7 @@ static struct clk_lookup nuc900_clkregs[] = {
        DEF_CLKLOOK(&clk_kpi, "nuc900-kpi", NULL),
        DEF_CLKLOOK(&clk_wdt, "nuc900-wdt", NULL),
        DEF_CLKLOOK(&clk_gdma, "nuc900-gdma", NULL),
-       DEF_CLKLOOK(&clk_adc, "nuc900-adc", NULL),
+       DEF_CLKLOOK(&clk_adc, "nuc900-ts", NULL),
        DEF_CLKLOOK(&clk_usi, "nuc900-spi", NULL),
        DEF_CLKLOOK(&clk_ext, NULL, "ext"),
        DEF_CLKLOOK(&clk_timer0, NULL, "timer0"),
index df4955885b21d412ede58df0a8c26a4fcb8ca0fb..9982eb385c0f9844d70f53d45c7377fcf5fa2887 100644 (file)
@@ -32,14 +32,14 @@ static uint32_t l2x0_way_mask;      /* Bitmask of active ways */
 static inline void cache_wait(void __iomem *reg, unsigned long mask)
 {
        /* wait for the operation to complete */
-       while (readl(reg) & mask)
+       while (readl_relaxed(reg) & mask)
                ;
 }
 
 static inline void cache_sync(void)
 {
        void __iomem *base = l2x0_base;
-       writel(0, base + L2X0_CACHE_SYNC);
+       writel_relaxed(0, base + L2X0_CACHE_SYNC);
        cache_wait(base + L2X0_CACHE_SYNC, 1);
 }
 
@@ -47,14 +47,14 @@ static inline void l2x0_clean_line(unsigned long addr)
 {
        void __iomem *base = l2x0_base;
        cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
-       writel(addr, base + L2X0_CLEAN_LINE_PA);
+       writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
 }
 
 static inline void l2x0_inv_line(unsigned long addr)
 {
        void __iomem *base = l2x0_base;
        cache_wait(base + L2X0_INV_LINE_PA, 1);
-       writel(addr, base + L2X0_INV_LINE_PA);
+       writel_relaxed(addr, base + L2X0_INV_LINE_PA);
 }
 
 #ifdef CONFIG_PL310_ERRATA_588369
@@ -75,9 +75,9 @@ static inline void l2x0_flush_line(unsigned long addr)
 
        /* Clean by PA followed by Invalidate by PA */
        cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
-       writel(addr, base + L2X0_CLEAN_LINE_PA);
+       writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
        cache_wait(base + L2X0_INV_LINE_PA, 1);
-       writel(addr, base + L2X0_INV_LINE_PA);
+       writel_relaxed(addr, base + L2X0_INV_LINE_PA);
 }
 #else
 
@@ -90,7 +90,7 @@ static inline void l2x0_flush_line(unsigned long addr)
 {
        void __iomem *base = l2x0_base;
        cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
-       writel(addr, base + L2X0_CLEAN_INV_LINE_PA);
+       writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
 }
 #endif
 
@@ -109,7 +109,7 @@ static inline void l2x0_inv_all(void)
 
        /* invalidate all ways */
        spin_lock_irqsave(&l2x0_lock, flags);
-       writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
+       writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
        cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
        cache_sync();
        spin_unlock_irqrestore(&l2x0_lock, flags);
@@ -215,8 +215,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
 
        l2x0_base = base;
 
-       cache_id = readl(l2x0_base + L2X0_CACHE_ID);
-       aux = readl(l2x0_base + L2X0_AUX_CTRL);
+       cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+       aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 
        aux &= aux_mask;
        aux |= aux_val;
@@ -248,15 +248,15 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
         * If you are booting from non-secure mode
         * accessing the below registers will fault.
         */
-       if (!(readl(l2x0_base + L2X0_CTRL) & 1)) {
+       if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
 
                /* l2x0 controller is disabled */
-               writel(aux, l2x0_base + L2X0_AUX_CTRL);
+               writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
 
                l2x0_inv_all();
 
                /* enable L2X0 */
-               writel(1, l2x0_base + L2X0_CTRL);
+               writel_relaxed(1, l2x0_base + L2X0_CTRL);
        }
 
        outer_cache.inv_range = l2x0_inv_range;
index 086816b205b8cf25c9d178c9d56c44a654e2ff9f..6ab244062b4ab2f720e84d870011faac64878c81 100644 (file)
@@ -163,19 +163,22 @@ static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
 
 void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
 {
-       unsigned int idx, cpu = smp_processor_id();
-       int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+       unsigned int idx, cpu;
+       int *depth;
        unsigned long vaddr, flags;
        pte_t pte, *ptep;
 
+       if (!in_interrupt())
+               preempt_disable();
+
+       cpu = smp_processor_id();
+       depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
+
        idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        ptep = TOP_PTE(vaddr);
        pte = mk_pte(page, kmap_prot);
 
-       if (!in_interrupt())
-               preempt_disable();
-
        raw_local_irq_save(flags);
        (*depth)++;
        if (pte_val(*ptep) == pte_val(pte)) {
index 1670734b7e5159d34ada41710c0dc5381fcc4c3a..37fa593884ee64d345add30e2ca9738e238e3511 100644 (file)
@@ -17,8 +17,8 @@
                .macro  addruart, rx
                mrc     p15, 0, \rx, c1, c0
                tst     \rx, #1                                 @ MMU enabled?
-               moveq   \rx, =SPEAR_DBG_UART_BASE               @ Physical base
-               movne   \rx, =VA_SPEAR_DBG_UART_BASE            @ Virtual base
+               moveq   \rx, #SPEAR_DBG_UART_BASE               @ Physical base
+               movne   \rx, #VA_SPEAR_DBG_UART_BASE            @ Virtual base
                .endm
 
                .macro  senduart, rd, rx
index 0cf2c0a4502bd55769edf2166899a2c47d833f8a..e6ac0b6610763e6a51c30d64c607691c4fe869fe 100644 (file)
@@ -54,6 +54,9 @@
 #define TIOCGPTN       _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
 #define TIOCSPTLCK     _IOW('T',0x31, int)  /* Lock/unlock Pty */
 
+#define TIOCGRS485      0x542E
+#define TIOCSRS485      0x542F
+
 #define FIONCLEX       0x5450
 #define FIOCLEX                0x5451
 #define FIOASYNC       0x5452
index c7f25bb1d068e0de55c8d2eeeb9ea817d5232328..61740201b3115eb7988ff3eca6728195f5d90d8b 100644 (file)
@@ -5,6 +5,7 @@
 #define __ASM_ARCH_BOARD_H
 
 #include <linux/types.h>
+#include <linux/serial.h>
 
 #define GPIO_PIN_NONE  (-1)
 
@@ -35,6 +36,7 @@ struct atmel_uart_data {
        short           use_dma_tx;     /* use transmit DMA? */
        short           use_dma_rx;     /* use receive DMA? */
        void __iomem    *regs;          /* virtual base address, if any */
+       struct serial_rs485     rs485;          /* rs485 settings */
 };
 void at32_map_usart(unsigned int hw_id, unsigned int line, int flags);
 struct platform_device *at32_add_device_usart(unsigned int id);
index 2580e77624d2072b4637b28eb32f5deb5053c1c0..f9e5622ebc95b8e37a42ac96d3556176c44b11c9 100644 (file)
@@ -435,20 +435,21 @@ static struct platform_device *au1xxx_platform_devices[] __initdata = {
 static int __init au1xxx_platform_init(void)
 {
        unsigned int uartclk = get_au1x00_uart_baud_base() * 16;
-       int i;
+       int err, i;
 
        /* Fill up uartclk. */
        for (i = 0; au1x00_uart_data[i].flags; i++)
                au1x00_uart_data[i].uartclk = uartclk;
 
+       err = platform_add_devices(au1xxx_platform_devices,
+                                  ARRAY_SIZE(au1xxx_platform_devices));
 #ifndef CONFIG_SOC_AU1100
        /* Register second MAC if enabled in pinfunc */
-       if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2))
+       if (!err && !(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2))
                platform_device_register(&au1xxx_eth1_device);
 #endif
 
-       return platform_add_devices(au1xxx_platform_devices,
-                                   ARRAY_SIZE(au1xxx_platform_devices));
+       return err;
 }
 
 arch_initcall(au1xxx_platform_init);
index a9f0336e1f1fd626cca60c57748c1430be64c8f4..52d883d37dd7e4b69a82ab89e724c7b0c2cca7d4 100644 (file)
@@ -67,8 +67,6 @@ static void mtx1_power_off(void)
 
 void __init board_setup(void)
 {
-       alchemy_gpio2_enable();
-
 #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
        /* Enable USB power switch */
        alchemy_gpio_direction_output(204, 0);
@@ -117,11 +115,11 @@ mtx1_pci_idsel(unsigned int devsel, int assert)
 
        if (assert && devsel != 0)
                /* Suppress signal to Cardbus */
-               gpio_set_value(1, 0);   /* set EXT_IO3 OFF */
+               alchemy_gpio_set_value(1, 0);   /* set EXT_IO3 OFF */
        else
-               gpio_set_value(1, 1);   /* set EXT_IO3 ON */
+               alchemy_gpio_set_value(1, 1);   /* set EXT_IO3 ON */
 
-       au_sync_udelay(1);
+       udelay(1);
        return 1;
 }
 
index 9f544badd0b4d5dddc25701a690bb63b797cf8f8..39c23366c5c7ea4bcbe6c272fe0279aca4d82aba 100644 (file)
@@ -104,6 +104,9 @@ int __init bcm63xx_enet_register(int unit,
        if (unit > 1)
                return -ENODEV;
 
+       if (unit == 1 && BCMCPU_IS_6338())
+               return -ENODEV;
+
        if (!shared_device_registered) {
                shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
                shared_res[0].end = shared_res[0].start;
index 59dc0c7ef7334b2e28e4b508e03f84e8f2ea2aca..c63c56bfd18461b558e0cba7380d6297fc47115e 100644 (file)
@@ -434,7 +434,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_add          \n"
-               "       addu    %0, %2                                  \n"
+               "       daddu   %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       .set    mips0                                   \n"
@@ -446,7 +446,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_add          \n"
-               "       addu    %0, %2                                  \n"
+               "       daddu   %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqz    %0, 2f                                  \n"
                "       .subsection 2                                   \n"
@@ -479,7 +479,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_sub          \n"
-               "       subu    %0, %2                                  \n"
+               "       dsubu   %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       .set    mips0                                   \n"
@@ -491,7 +491,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %0, %1          # atomic64_sub          \n"
-               "       subu    %0, %2                                  \n"
+               "       dsubu   %0, %2                                  \n"
                "       scd     %0, %1                                  \n"
                "       beqz    %0, 2f                                  \n"
                "       .subsection 2                                   \n"
@@ -524,10 +524,10 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_add_return   \n"
-               "       addu    %0, %1, %3                              \n"
+               "       daddu   %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
-               "       addu    %0, %1, %3                              \n"
+               "       daddu   %0, %1, %3                              \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -538,10 +538,10 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_add_return   \n"
-               "       addu    %0, %1, %3                              \n"
+               "       daddu   %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqz    %0, 2f                                  \n"
-               "       addu    %0, %1, %3                              \n"
+               "       daddu   %0, %1, %3                              \n"
                "       .subsection 2                                   \n"
                "2:     b       1b                                      \n"
                "       .previous                                       \n"
@@ -576,10 +576,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_sub_return   \n"
-               "       subu    %0, %1, %3                              \n"
+               "       dsubu   %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
-               "       subu    %0, %1, %3                              \n"
+               "       dsubu   %0, %1, %3                              \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -590,10 +590,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                __asm__ __volatile__(
                "       .set    mips3                                   \n"
                "1:     lld     %1, %2          # atomic64_sub_return   \n"
-               "       subu    %0, %1, %3                              \n"
+               "       dsubu   %0, %1, %3                              \n"
                "       scd     %0, %2                                  \n"
                "       beqz    %0, 2f                                  \n"
-               "       subu    %0, %1, %3                              \n"
+               "       dsubu   %0, %1, %3                              \n"
                "       .subsection 2                                   \n"
                "2:     b       1b                                      \n"
                "       .previous                                       \n"
index 1b5a6648eb86dda1598877acf8fd4a9ac9b301c1..baa318a59c97f8c2791842df809c1218796c7492 100644 (file)
 #define __NR_perf_event_open           (__NR_Linux + 296)
 #define __NR_accept4                   (__NR_Linux + 297)
 #define __NR_recvmmsg                  (__NR_Linux + 298)
+#define __NR_getdents64                        (__NR_Linux + 299)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            298
+#define __NR_Linux_syscalls            299
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                298
+#define __NR_N32_Linux_syscalls                299
 
 #ifdef __KERNEL__
 
index a5297e2a353aede2279664446611de8100bf1f98..a4faceea9d885043a0628afcf649d6b957c9b7a3 100644 (file)
@@ -419,4 +419,5 @@ EXPORT(sysn32_call_table)
        PTR     sys_perf_event_open
        PTR     sys_accept4
        PTR     compat_sys_recvmmsg
+       PTR     sys_getdents
        .size   sysn32_call_table,.-sysn32_call_table
index b773c1112b140f0f976139a3f57a9e69b2ee4bde..e5cdfd603f8fb6dabbdf22051b043604c8ad6496 100644 (file)
@@ -61,11 +61,9 @@ static int __init init_vdso(void)
 
        vunmap(vdso);
 
-       pr_notice("init_vdso successfull\n");
-
        return 0;
 }
-device_initcall(init_vdso);
+subsys_initcall(init_vdso);
 
 static unsigned long vdso_addr(unsigned long start)
 {
index 2fbfa1a8c3a9a96a4a2bc159a9eb8c986bc4297e..bf80921f2f56c0b145e698fa2c673ff49c7c0636 100644 (file)
@@ -247,6 +247,8 @@ void __init mips_pcibios_init(void)
        iomem_resource.end &= 0xfffffffffULL;                   /* 64 GB */
        ioport_resource.end = controller->io_resource->end;
 
+       controller->io_map_base = mips_io_port_base;
+
        register_pci_controller(controller);
 }
 
index eee4f3dfc410e248d5caf1e625b14dcbbb273ff3..98e86ddb86ccd6e07766d53a04998d7f3a413cc4 100644 (file)
@@ -44,6 +44,7 @@ extern struct pci_ops pnx8550_pci_ops;
 
 static struct pci_controller pnx8550_controller = {
        .pci_ops        = &pnx8550_pci_ops,
+       .io_map_base    = PNX8550_PORT_BASE,
        .io_resource    = &pci_io_resource,
        .mem_resource   = &pci_mem_resource,
 };
index 2aed50fef10ff8b800a58c8cdc0551d654b8fbf8..64246c9c875c51d09e5c3861ca0e6f1096d50ac5 100644 (file)
@@ -113,7 +113,7 @@ void __init plat_mem_setup(void)
        PNX8550_GLB2_ENAB_INTA_O = 0;
 
        /* IO/MEM resources. */
-       set_io_port_base(KSEG1);
+       set_io_port_base(PNX8550_PORT_BASE);
        ioport_resource.start = 0;
        ioport_resource.end = ~0;
        iomem_resource.start = 0;
index 04b31478a6d7d6631ad3faa684be6f41e66e2a09..b7c03d80c88c80e1a54af4254d42522b95e9f76a 100644 (file)
@@ -944,6 +944,7 @@ static struct pci_controller msp_pci_controller = {
        .pci_ops        = &msp_pci_ops,
        .mem_resource   = &pci_mem_resource,
        .mem_offset     = 0,
+       .io_map_base    = MSP_PCI_IOSPACE_BASE,
        .io_resource    = &pci_io_resource,
        .io_offset      = 0
 };
index 0357946f30e6ec5d2719446eaf68a9b86e980b98..cf5e1a25cb7d7fb942c3d0ebe53e9b3c6e5e14c1 100644 (file)
@@ -54,6 +54,7 @@ static int __init pmc_yosemite_setup(void)
                panic(ioremap_failed);
 
        set_io_port_base(io_v_base);
+       py_controller.io_map_base = io_v_base;
        TITAN_WRITE(RM9000x2_OCD_LKM7, TITAN_READ(RM9000x2_OCD_LKM7) | 1);
 
        ioport_resource.end = TITAN_IO_SIZE - 1;
index 8ee77887306a24986a54b27af1b3dc009011273a..9ec523e4dd060afb93ebd5fea93c07450daf933f 100644 (file)
@@ -472,6 +472,9 @@ void __init configure_platform(void)
                 * it*/
                platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
 
+               /* Cronus and Cronus Lite have the same register map */
+               set_register_map(CRONUS_IO_BASE, &cronus_register_map);
+
                /* ASIC version will determine if this is a real CronusLite or
                 * Castrati(Cronus) */
                chipversion  = asic_read(chipver3) << 24;
@@ -484,8 +487,6 @@ void __init configure_platform(void)
                else
                        asic = ASIC_CRONUSLITE;
 
-               /* Cronus and Cronus Lite have the same register map */
-               set_register_map(CRONUS_IO_BASE, &cronus_register_map);
                gp_resources = non_dvr_cronuslite_resources;
                pr_info("Platform: 4600 - %s, NON_DVR_CAPABLE, "
                        "chipversion=0x%08X\n",
index 2a9cd74a841e3e234b6d1a4bb6beb8e5cd1b207b..076327f2eff777b2cdbd4e15de8336ce7b03faa6 100644 (file)
@@ -8,9 +8,9 @@
  * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory
  * and therefore we can only deal with memory within this range
  */
-#define KEXEC_SOURCE_MEMORY_LIMIT      (2 * 1024 * 1024 * 1024UL)
-#define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL)
-#define KEXEC_CONTROL_MEMORY_LIMIT     (2 * 1024 * 1024 * 1024UL)
+#define KEXEC_SOURCE_MEMORY_LIMIT      (2 * 1024 * 1024 * 1024UL - 1)
+#define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1)
+#define KEXEC_CONTROL_MEMORY_LIMIT     (2 * 1024 * 1024 * 1024UL - 1)
 
 #else
 
index 2102b214a87c9ab49b7540962c2faef15251f55c..0e398cfee2c82826b6abad1eb8ece6330df6b0b4 100644 (file)
@@ -250,7 +250,9 @@ extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
                     pte_t *ptep, unsigned long trap, int local, int ssize,
                     unsigned int shift, unsigned int mmu_psize);
-
+extern void hash_failure_debug(unsigned long ea, unsigned long access,
+                              unsigned long vsid, unsigned long trap,
+                              int ssize, int psize, unsigned long pte);
 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
                             unsigned long pstart, unsigned long prot,
                             int psize, int ssize);
index 369872f6cf78aa139564275b714af2f6901af85a..babcceecd2eab6335217d2434c6e91e5c7fe5884 100644 (file)
@@ -566,9 +566,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
         * Finally record data if requested.
         */
        if (record) {
-               struct perf_sample_data data = {
-                       .period = event->hw.last_period,
-               };
+               struct perf_sample_data data;
+
+               perf_sample_data_init(&data, 0);
 
                if (perf_event_overflow(event, nmi, &data, regs)) {
                        /*
index 9d3953983fb74759a2ab2bfbb2a5f7dd316e62ca..fed9bf6187d1a514e88677b144c5ad1273be8505 100644 (file)
@@ -414,7 +414,7 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
        u64 base, size, memblock_size;
        unsigned int is_kexec_kdump = 0, rngs;
 
-       ls = of_get_flat_dt_prop(node, "ibm,memblock-size", &l);
+       ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
        if (ls == NULL || l < dt_root_size_cells * sizeof(__be32))
                return 0;
        memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls);
index a719f53921a57c390503b708c38cb4e1146a176b..3079f6b44cf531e5f1dafc8fa9e9a9e38e59dc15 100644 (file)
@@ -68,9 +68,6 @@ _GLOBAL(__hash_page_4K)
        std     r8,STK_PARM(r8)(r1)
        std     r9,STK_PARM(r9)(r1)
        
-       /* Add _PAGE_PRESENT to access */
-       ori     r4,r4,_PAGE_PRESENT
-
        /* Save non-volatile registers.
         * r31 will hold "old PTE"
         * r30 is "new PTE"
@@ -347,9 +344,6 @@ _GLOBAL(__hash_page_4K)
        std     r8,STK_PARM(r8)(r1)
        std     r9,STK_PARM(r9)(r1)
 
-       /* Add _PAGE_PRESENT to access */
-       ori     r4,r4,_PAGE_PRESENT
-
        /* Save non-volatile registers.
         * r31 will hold "old PTE"
         * r30 is "new PTE"
@@ -687,9 +681,6 @@ _GLOBAL(__hash_page_64K)
        std     r8,STK_PARM(r8)(r1)
        std     r9,STK_PARM(r9)(r1)
 
-       /* Add _PAGE_PRESENT to access */
-       ori     r4,r4,_PAGE_PRESENT
-
        /* Save non-volatile registers.
         * r31 will hold "old PTE"
         * r30 is "new PTE"
index 98f262de558526899fa5c58fe6802654bb0a5833..09dffe6efa46aeae84d62a922215f87fe0a8ca2d 100644 (file)
@@ -871,6 +871,18 @@ static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
 }
 #endif
 
+void hash_failure_debug(unsigned long ea, unsigned long access,
+                       unsigned long vsid, unsigned long trap,
+                       int ssize, int psize, unsigned long pte)
+{
+       if (!printk_ratelimit())
+               return;
+       pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n",
+               ea, access, current->comm);
+       pr_info("    trap=0x%lx vsid=0x%lx ssize=%d psize=%d pte=0x%lx\n",
+               trap, vsid, ssize, psize, pte);
+}
+
 /* Result code is:
  *  0 - handled
  *  1 - normal page fault
@@ -955,6 +967,17 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                return 1;
        }
 
+       /* Add _PAGE_PRESENT to the required access perm */
+       access |= _PAGE_PRESENT;
+
+       /* Pre-check access permissions (will be re-checked atomically
+        * in __hash_page_XX but this pre-check is a fast path
+        */
+       if (access & ~pte_val(*ptep)) {
+               DBG_LOW(" no access !\n");
+               return 1;
+       }
+
 #ifdef CONFIG_HUGETLB_PAGE
        if (hugeshift)
                return __hash_page_huge(ea, access, vsid, ptep, trap, local,
@@ -967,14 +990,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
        DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
                pte_val(*(ptep + PTRS_PER_PTE)));
 #endif
-       /* Pre-check access permissions (will be re-checked atomically
-        * in __hash_page_XX but this pre-check is a fast path
-        */
-       if (access & ~pte_val(*ptep)) {
-               DBG_LOW(" no access !\n");
-               return 1;
-       }
-
        /* Do actual hashing */
 #ifdef CONFIG_PPC_64K_PAGES
        /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
@@ -1033,6 +1048,12 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
                                            local, ssize, spp);
        }
 
+       /* Dump some info in case of hash insertion failure, they should
+        * never happen so it is really useful to know if/when they do
+        */
+       if (rc == -1)
+               hash_failure_debug(ea, access, vsid, trap, ssize, psize,
+                                  pte_val(*ptep));
 #ifndef CONFIG_PPC_64K_PAGES
        DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
 #else
@@ -1051,8 +1072,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
        void *pgdir;
        pte_t *ptep;
        unsigned long flags;
-       int local = 0;
-       int ssize;
+       int rc, ssize, local = 0;
 
        BUG_ON(REGION_ID(ea) != USER_REGION_ID);
 
@@ -1098,11 +1118,18 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
        /* Hash it in */
 #ifdef CONFIG_PPC_HAS_HASH_64K
        if (mm->context.user_psize == MMU_PAGE_64K)
-               __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
+               rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
        else
 #endif /* CONFIG_PPC_HAS_HASH_64K */
-               __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
-                              subpage_protection(pgdir, ea));
+               rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
+                                   subpage_protection(pgdir, ea));
+
+       /* Dump some info in case of hash insertion failure, they should
+        * never happen so it is really useful to know if/when they do
+        */
+       if (rc == -1)
+               hash_failure_debug(ea, access, vsid, trap, ssize,
+                                  mm->context.user_psize, pte_val(*ptep));
 
        local_irq_restore(flags);
 }
index 199539882f927620d6a435b92241e1f08f9aa4a1..cc5c273086cf833e28bc89a91bb814b09c16a4e4 100644 (file)
@@ -21,21 +21,13 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
        unsigned long old_pte, new_pte;
        unsigned long va, rflags, pa, sz;
        long slot;
-       int err = 1;
 
        BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
 
        /* Search the Linux page table for a match with va */
        va = hpt_va(ea, vsid, ssize);
 
-       /*
-        * Check the user's access rights to the page.  If access should be
-        * prevented then send the problem up to do_page_fault.
-        */
-       if (unlikely(access & ~pte_val(*ptep)))
-               goto out;
-       /*
-        * At this point, we have a pte (old_pte) which can be used to build
+       /* At this point, we have a pte (old_pte) which can be used to build
         * or update an HPTE. There are 2 cases:
         *
         * 1. There is a valid (present) pte with no associated HPTE (this is
@@ -49,9 +41,17 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 
        do {
                old_pte = pte_val(*ptep);
-               if (old_pte & _PAGE_BUSY)
-                       goto out;
+               /* If PTE busy, retry the access */
+               if (unlikely(old_pte & _PAGE_BUSY))
+                       return 0;
+               /* If PTE permissions don't match, take page fault */
+               if (unlikely(access & ~old_pte))
+                       return 1;
+               /* Try to lock the PTE, add ACCESSED and DIRTY if it was
+                * a write access */
                new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
+               if (access & _PAGE_RW)
+                       new_pte |= _PAGE_DIRTY;
        } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
                                         old_pte, new_pte));
 
@@ -121,8 +121,16 @@ repeat:
                         }
                }
 
-               if (unlikely(slot == -2))
-                       panic("hash_huge_page: pte_insert failed\n");
+               /*
+                * Hypervisor failure. Restore old pte and return -1
+                * similar to __hash_page_*
+                */
+               if (unlikely(slot == -2)) {
+                       *ptep = __pte(old_pte);
+                       hash_failure_debug(ea, access, vsid, trap, ssize,
+                                          mmu_psize, old_pte);
+                       return -1;
+               }
 
                new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
        }
@@ -131,9 +139,5 @@ repeat:
         * No need to use ldarx/stdcx here
         */
        *ptep = __pte(new_pte & ~_PAGE_BUSY);
-
-       err = 0;
-
- out:
-       return err;
+       return 0;
 }
index f47364585ecd0bce3df61a5397fa3f9d0ccf7feb..aa731af720c04e4816f0e9826d7f05109631bccb 100644 (file)
@@ -398,15 +398,15 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
 }
 
 /*
- * Retreive and validate the ibm,memblock-size property for drconf memory
+ * Retreive and validate the ibm,lmb-size property for drconf memory
  * from the device tree.
  */
-static u64 of_get_memblock_size(struct device_node *memory)
+static u64 of_get_lmb_size(struct device_node *memory)
 {
        const u32 *prop;
        u32 len;
 
-       prop = of_get_property(memory, "ibm,memblock-size", &len);
+       prop = of_get_property(memory, "ibm,lmb-size", &len);
        if (!prop || len < sizeof(unsigned int))
                return 0;
 
@@ -562,7 +562,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
 static inline int __init read_usm_ranges(const u32 **usm)
 {
        /*
-        * For each memblock in ibm,dynamic-memory a corresponding
+        * For each lmb in ibm,dynamic-memory a corresponding
         * entry in linux,drconf-usable-memory property contains
         * a counter followed by that many (base, size) duple.
         * read the counter from linux,drconf-usable-memory
@@ -578,7 +578,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
 {
        const u32 *dm, *usm;
        unsigned int n, rc, ranges, is_kexec_kdump = 0;
-       unsigned long memblock_size, base, size, sz;
+       unsigned long lmb_size, base, size, sz;
        int nid;
        struct assoc_arrays aa;
 
@@ -586,8 +586,8 @@ static void __init parse_drconf_memory(struct device_node *memory)
        if (!n)
                return;
 
-       memblock_size = of_get_memblock_size(memory);
-       if (!memblock_size)
+       lmb_size = of_get_lmb_size(memory);
+       if (!lmb_size)
                return;
 
        rc = of_get_assoc_arrays(memory, &aa);
@@ -611,7 +611,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
                        continue;
 
                base = drmem.base_addr;
-               size = memblock_size;
+               size = lmb_size;
                ranges = 1;
 
                if (is_kexec_kdump) {
@@ -1072,7 +1072,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
 {
        const u32 *dm;
        unsigned int drconf_cell_cnt, rc;
-       unsigned long memblock_size;
+       unsigned long lmb_size;
        struct assoc_arrays aa;
        int nid = -1;
 
@@ -1080,8 +1080,8 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
        if (!drconf_cell_cnt)
                return -1;
 
-       memblock_size = of_get_memblock_size(memory);
-       if (!memblock_size)
+       lmb_size = of_get_lmb_size(memory);
+       if (!lmb_size)
                return -1;
 
        rc = of_get_assoc_arrays(memory, &aa);
@@ -1100,7 +1100,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
                        continue;
 
                if ((scn_addr < drmem.base_addr)
-                   || (scn_addr >= (drmem.base_addr + memblock_size)))
+                   || (scn_addr >= (drmem.base_addr + lmb_size)))
                        continue;
 
                nid = of_drconf_to_nid_single(&drmem, &aa);
index deab5f9460907891a3134a1d7f6bcdb459796dcf..bc88036641406bf283aa96f8c3b4609a3ecef85a 100644 (file)
@@ -69,7 +69,7 @@ static int pseries_remove_memory(struct device_node *np)
        const char *type;
        const unsigned int *regs;
        unsigned long base;
-       unsigned int memblock_size;
+       unsigned int lmb_size;
        int ret = -EINVAL;
 
        /*
@@ -87,9 +87,9 @@ static int pseries_remove_memory(struct device_node *np)
                return ret;
 
        base = *(unsigned long *)regs;
-       memblock_size = regs[3];
+       lmb_size = regs[3];
 
-       ret = pseries_remove_memblock(base, memblock_size);
+       ret = pseries_remove_memblock(base, lmb_size);
        return ret;
 }
 
@@ -98,7 +98,7 @@ static int pseries_add_memory(struct device_node *np)
        const char *type;
        const unsigned int *regs;
        unsigned long base;
-       unsigned int memblock_size;
+       unsigned int lmb_size;
        int ret = -EINVAL;
 
        /*
@@ -116,36 +116,36 @@ static int pseries_add_memory(struct device_node *np)
                return ret;
 
        base = *(unsigned long *)regs;
-       memblock_size = regs[3];
+       lmb_size = regs[3];
 
        /*
         * Update memory region to represent the memory add
         */
-       ret = memblock_add(base, memblock_size);
+       ret = memblock_add(base, lmb_size);
        return (ret < 0) ? -EINVAL : 0;
 }
 
 static int pseries_drconf_memory(unsigned long *base, unsigned int action)
 {
        struct device_node *np;
-       const unsigned long *memblock_size;
+       const unsigned long *lmb_size;
        int rc;
 
        np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
        if (!np)
                return -EINVAL;
 
-       memblock_size = of_get_property(np, "ibm,memblock-size", NULL);
-       if (!memblock_size) {
+       lmb_size = of_get_property(np, "ibm,lmb-size", NULL);
+       if (!lmb_size) {
                of_node_put(np);
                return -EINVAL;
        }
 
        if (action == PSERIES_DRCONF_MEM_ADD) {
-               rc = memblock_add(*base, *memblock_size);
+               rc = memblock_add(*base, *lmb_size);
                rc = (rc < 0) ? -EINVAL : 0;
        } else if (action == PSERIES_DRCONF_MEM_REMOVE) {
-               rc = pseries_remove_memblock(*base, *memblock_size);
+               rc = pseries_remove_memblock(*base, *lmb_size);
        } else {
                rc = -EINVAL;
        }
index d5e3e60074472f8824f599d503c40744ad622ab0..bea9ee37ac9d2fc78cb9c5c7da4454bf529cb955 100644 (file)
@@ -535,8 +535,16 @@ pgm_no_vtime2:
        l       %r3,__LC_PGM_ILC        # load program interruption code
        la      %r8,0x7f
        nr      %r8,%r3                 # clear per-event-bit and ilc
-       be      BASED(pgm_exit)         # only per or per+check ?
-       b       BASED(pgm_do_call)
+       be      BASED(pgm_exit2)        # only per or per+check ?
+       l       %r7,BASED(.Ljump_table)
+       sll     %r8,2
+       l       %r7,0(%r8,%r7)          # load address of handler routine
+       la      %r2,SP_PTREGS(%r15)     # address of register-save area
+       basr    %r14,%r7                # branch to interrupt-handler
+pgm_exit2:
+       TRACE_IRQS_ON
+       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       b       BASED(sysc_return)
 
 #
 # it was a single stepped SVC that is causing all the trouble
index e7192e1cb6785b24db347b2bbe2c22a469bdeb7d..8bccec15ea90086a557de590f90977d1e018cb4f 100644 (file)
@@ -544,8 +544,16 @@ pgm_no_vtime2:
        lgf     %r3,__LC_PGM_ILC        # load program interruption code
        lghi    %r8,0x7f
        ngr     %r8,%r3                 # clear per-event-bit and ilc
-       je      pgm_exit
-       j       pgm_do_call
+       je      pgm_exit2
+       sll     %r8,3
+       larl    %r1,pgm_check_table
+       lg      %r1,0(%r8,%r1)          # load address of handler routine
+       la      %r2,SP_PTREGS(%r15)     # address of register-save area
+       basr    %r14,%r1                # branch to interrupt-handler
+pgm_exit2:
+       TRACE_IRQS_ON
+       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       j       sysc_return
 
 #
 # it was a single stepped SVC that is causing all the trouble
index a2163c95eb9845ffac908bf09b7af1bb5084cf3c..15a7536452d508b7998d37d9b679f4545d670dc1 100644 (file)
@@ -524,8 +524,11 @@ void etr_switch_to_local(void)
        if (!etr_eacr.sl)
                return;
        disable_sync_clock(NULL);
-       set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
-       queue_work(time_sync_wq, &etr_work);
+       if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) {
+               etr_eacr.es = etr_eacr.sl = 0;
+               etr_setr(&etr_eacr);
+               queue_work(time_sync_wq, &etr_work);
+       }
 }
 
 /*
@@ -539,8 +542,11 @@ void etr_sync_check(void)
        if (!etr_eacr.es)
                return;
        disable_sync_clock(NULL);
-       set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
-       queue_work(time_sync_wq, &etr_work);
+       if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) {
+               etr_eacr.es = 0;
+               etr_setr(&etr_eacr);
+               queue_work(time_sync_wq, &etr_work);
+       }
 }
 
 /*
@@ -902,7 +908,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
         * Do not try to get the alternate port aib if the clock
         * is not in sync yet.
         */
-       if (!check_sync_clock())
+       if (!eacr.es || !check_sync_clock())
                return eacr;
 
        /*
@@ -1064,7 +1070,7 @@ static void etr_work_fn(struct work_struct *work)
         * If the clock is in sync just update the eacr and return.
         * If there is no valid sync port wait for a port update.
         */
-       if (check_sync_clock() || sync_port < 0) {
+       if ((eacr.es && check_sync_clock()) || sync_port < 0) {
                etr_update_eacr(eacr);
                etr_set_tolec_timeout(now);
                goto out_unlock;
index 2e837f5080fe56d3ad0c87d308564cf157147624..fb7a5f052e2b8766d11115e3f7fc174fadf6ac2f 100644 (file)
@@ -145,6 +145,15 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
                percpu_entry->states[cx->index].eax = cx->address;
                percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
        }
+
+       /*
+        * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
+        * then we should skip checking BM_STS for this C-state.
+        * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
+        */
+       if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
+               cx->bm_sts_skip = 1;
+
        return retval;
 }
 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
index 82e508677b91116ec8428e434ae164a8390084bf..fcc3c61fdecc0a8e73859868f5f863c9fdb315e3 100644 (file)
@@ -157,9 +157,14 @@ static int __init acpi_sleep_setup(char *str)
 #ifdef CONFIG_HIBERNATION
                if (strncmp(str, "s4_nohwsig", 10) == 0)
                        acpi_no_s4_hw_signature();
-               if (strncmp(str, "s4_nonvs", 8) == 0)
-                       acpi_s4_no_nvs();
+               if (strncmp(str, "s4_nonvs", 8) == 0) {
+                       pr_warning("ACPI: acpi_sleep=s4_nonvs is deprecated, "
+                                       "please use acpi_sleep=nonvs instead");
+                       acpi_nvs_nosave();
+               }
 #endif
+               if (strncmp(str, "nonvs", 5) == 0)
+                       acpi_nvs_nosave();
                if (strncmp(str, "old_ordering", 12) == 0)
                        acpi_old_suspend_ordering();
                str = strchr(str, ',');
index ce7cde713e7174e8293b552b332f968946a46904..a36de5bbb6222e953986ee4f4b34fe4caee1388d 100644 (file)
@@ -368,22 +368,16 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
                return -ENODEV;
 
        out_obj = output.pointer;
-       if (out_obj->type != ACPI_TYPE_BUFFER) {
-               ret = -ENODEV;
-               goto out_free;
-       }
+       if (out_obj->type != ACPI_TYPE_BUFFER)
+               return -ENODEV;
 
        errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
-       if (errors) {
-               ret = -ENODEV;
-               goto out_free;
-       }
+       if (errors)
+               return -ENODEV;
 
        supported = *((u32 *)(out_obj->buffer.pointer + 4));
-       if (!(supported & 0x1)) {
-               ret = -ENODEV;
-               goto out_free;
-       }
+       if (!(supported & 0x1))
+               return -ENODEV;
 
 out_free:
        kfree(output.pointer);
@@ -397,13 +391,17 @@ static int __init pcc_cpufreq_probe(void)
        struct pcc_memory_resource *mem_resource;
        struct pcc_register_resource *reg_resource;
        union acpi_object *out_obj, *member;
-       acpi_handle handle, osc_handle;
+       acpi_handle handle, osc_handle, pcch_handle;
        int ret = 0;
 
        status = acpi_get_handle(NULL, "\\_SB", &handle);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
+       status = acpi_get_handle(handle, "PCCH", &pcch_handle);
+       if (ACPI_FAILURE(status))
+               return -ENODEV;
+
        status = acpi_get_handle(handle, "_OSC", &osc_handle);
        if (ACPI_SUCCESS(status)) {
                ret = pcc_cpufreq_do_osc(&osc_handle);
@@ -543,13 +541,13 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
        if (!pcch_virt_addr) {
                result = -1;
-               goto pcch_null;
+               goto out;
        }
 
        result = pcc_get_offset(cpu);
        if (result) {
                dprintk("init: PCCP evaluation failed\n");
-               goto free;
+               goto out;
        }
 
        policy->max = policy->cpuinfo.max_freq =
@@ -558,14 +556,15 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
                ioread32(&pcch_hdr->minimum_frequency) * 1000;
        policy->cur = pcc_get_freq(cpu);
 
+       if (!policy->cur) {
+               dprintk("init: Unable to get current CPU frequency\n");
+               result = -EINVAL;
+               goto out;
+       }
+
        dprintk("init: policy->max is %d, policy->min is %d\n",
                policy->max, policy->min);
-
-       return 0;
-free:
-       pcc_clear_mapping();
-       free_percpu(pcc_cpu_info);
-pcch_null:
+out:
        return result;
 }
 
index 7ec2123838e6f70f5af02e6e5804143bcc29090c..3e90cce3dc8bfe50fa245e8febe2aaf2625df25e 100644 (file)
@@ -1023,13 +1023,12 @@ static int get_transition_latency(struct powernow_k8_data *data)
        }
        if (max_latency == 0) {
                /*
-                * Fam 11h always returns 0 as transition latency.
-                * This is intended and means "very fast". While cpufreq core
-                * and governors currently can handle that gracefully, better
-                * set it to 1 to avoid problems in the future.
-                * For all others it's a BIOS bug.
+                * Fam 11h and later may return 0 as transition latency. This
+                * is intended and means "very fast". While cpufreq core and
+                * governors currently can handle that gracefully, better set it
+                * to 1 to avoid problems in the future.
                 */
-               if (boot_cpu_data.x86 != 0x11)
+               if (boot_cpu_data.x86 < 0x11)
                        printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
                                "latency\n");
                max_latency = 1;
index a198b7c87a123d2f80c6313261e9b80e8346cad1..ba390d731175a492686643dd873786eafda9521b 100644 (file)
@@ -964,7 +964,7 @@ fs_initcall(hpet_late_init);
 
 void hpet_disable(void)
 {
-       if (is_hpet_capable()) {
+       if (is_hpet_capable() && hpet_virt_address) {
                unsigned int cfg = hpet_readl(HPET_CFG);
 
                if (hpet_legacy_int_enabled) {
index 7c9f02c130f3ca9167e81205f39c968174a8e9ef..cafa7c80ac95714e7097ba06ff195cd6f285417f 100644 (file)
@@ -276,16 +276,6 @@ static struct sys_device device_i8259A = {
        .cls    = &i8259_sysdev_class,
 };
 
-static int __init i8259A_init_sysfs(void)
-{
-       int error = sysdev_class_register(&i8259_sysdev_class);
-       if (!error)
-               error = sysdev_register(&device_i8259A);
-       return error;
-}
-
-device_initcall(i8259A_init_sysfs);
-
 static void mask_8259A(void)
 {
        unsigned long flags;
@@ -407,3 +397,18 @@ struct legacy_pic default_legacy_pic = {
 };
 
 struct legacy_pic *legacy_pic = &default_legacy_pic;
+
+static int __init i8259A_init_sysfs(void)
+{
+       int error;
+
+       if (legacy_pic != &default_legacy_pic)
+               return 0;
+
+       error = sysdev_class_register(&i8259_sysdev_class);
+       if (!error)
+               error = sysdev_register(&device_i8259A);
+       return error;
+}
+
+device_initcall(i8259A_init_sysfs);
index 4f4af75b94828af47b811b6b5c6a45e7c4ae650e..01ab17ae2ae72c7bdf2d2c4de2a51f19f59755bf 100644 (file)
@@ -572,7 +572,6 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
        return NOTIFY_STOP;
 }
 
-#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 int kgdb_ll_trap(int cmd, const char *str,
                 struct pt_regs *regs, long err, int trap, int sig)
 {
@@ -590,7 +589,6 @@ int kgdb_ll_trap(int cmd, const char *str,
 
        return __kgdb_notify(&args, cmd);
 }
-#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 
 static int
 kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
@@ -625,6 +623,12 @@ int kgdb_arch_init(void)
        return register_die_notifier(&kgdb_notifier);
 }
 
+static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi,
+               struct perf_sample_data *data, struct pt_regs *regs)
+{
+       kgdb_ll_trap(DIE_DEBUG, "debug", regs, 0, 0, SIGTRAP);
+}
+
 void kgdb_arch_late(void)
 {
        int i, cpu;
@@ -655,6 +659,7 @@ void kgdb_arch_late(void)
                for_each_online_cpu(cpu) {
                        pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
                        pevent[0]->hw.sample_period = 1;
+                       pevent[0]->overflow_handler = kgdb_hw_overflow_handler;
                        if (pevent[0]->destroy != NULL) {
                                pevent[0]->destroy = NULL;
                                release_bp_slot(*pevent);
index 89d66ca4d87cb2982ae48c5877580e733ab0cce8..2331bdc2b549569e56f7dcec275c72bf06275928 100644 (file)
@@ -342,6 +342,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                        /* advance table_gfn when emulating 1gb pages with 4k */
                        if (delta == 0)
                                table_gfn += PT_INDEX(addr, level);
+                       access &= gw->pte_access;
                } else {
                        direct = 0;
                        table_gfn = gw->table_gfn[level - 2];
index 05d571f6f19615cab8a90dec413e0a39537e914a..7fa89c39c64f67df0a62804cfa242ae6a2d6f638 100644 (file)
@@ -1562,7 +1562,7 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
 
        r = -ENOMEM;
        size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
-       entries = vmalloc(size);
+       entries = kmalloc(size, GFP_KERNEL);
        if (!entries)
                goto out;
 
@@ -1581,7 +1581,7 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
        r = n;
 
 out_free:
-       vfree(entries);
+       kfree(entries);
 out:
        return r;
 }
index d97b8dce16681ba90886a3b0e5546cf5b7060ce8..18b3f1468b7daeebd94dbe43216a4078951d9972 100644 (file)
@@ -70,6 +70,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
 acpi_status acpi_enable(void)
 {
        acpi_status status;
+       int retry;
 
        ACPI_FUNCTION_TRACE(acpi_enable);
 
@@ -98,16 +99,18 @@ acpi_status acpi_enable(void)
 
        /* Sanity check that transition succeeded */
 
-       if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) {
-               ACPI_ERROR((AE_INFO,
-                           "Hardware did not enter ACPI mode"));
-               return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+       for (retry = 0; retry < 30000; ++retry) {
+               if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
+                       if (retry != 0)
+                               ACPI_WARNING((AE_INFO,
+                               "Platform took > %d00 usec to enter ACPI mode", retry));
+                       return_ACPI_STATUS(AE_OK);
+               }
+               acpi_os_stall(100);     /* 100 usec */
        }
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INIT,
-                         "Transition to ACPI mode successful\n"));
-
-       return_ACPI_STATUS(AE_OK);
+       ACPI_ERROR((AE_INFO, "Hardware did not enter ACPI mode"));
+       return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
 }
 
 ACPI_EXPORT_SYMBOL(acpi_enable)
index 3026e3fa83efc82923daf64e1f00fd2b59f23848..dc58402b0a177a4e03e8dc54e7a094804edc1d36 100644 (file)
@@ -868,9 +868,15 @@ static void acpi_battery_remove_fs(struct acpi_device *device)
 static void acpi_battery_notify(struct acpi_device *device, u32 event)
 {
        struct acpi_battery *battery = acpi_driver_data(device);
+#ifdef CONFIG_ACPI_SYSFS_POWER
+       struct device *old;
+#endif
 
        if (!battery)
                return;
+#ifdef CONFIG_ACPI_SYSFS_POWER
+       old = battery->bat.dev;
+#endif
        acpi_battery_update(battery);
        acpi_bus_generate_proc_event(device, event,
                                     acpi_battery_present(battery));
@@ -879,7 +885,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
                                        acpi_battery_present(battery));
 #ifdef CONFIG_ACPI_SYSFS_POWER
        /* acpi_battery_update could remove power_supply object */
-       if (battery->bat.dev)
+       if (old && battery->bat.dev)
                power_supply_changed(&battery->bat);
 #endif
 }
index 01381be05e96b3dda60b1d753b81ecf5dc610dcd..2bb28b9d91c4c2643106ed0d63069ae85799d136 100644 (file)
@@ -214,7 +214,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
        .ident = "Sony VGN-SR290J",
        .matches = {
                     DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
-                    DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR290J"),
                },
        },
        {
index 51284351418f4a5cb0f176e1475d94db1e5e4b83..e9699aaed1092874b0f275d2ca86a8142850db1c 100644 (file)
@@ -223,7 +223,7 @@ static bool processor_physically_present(acpi_handle handle)
        type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
        cpuid = acpi_get_cpuid(handle, type, acpi_id);
 
-       if (cpuid == -1)
+       if ((cpuid == -1) && (num_possible_cpus() > 1))
                return false;
 
        return true;
index b1b385692f46e09f05dd55882ed3ea9c73039b74..e9a8026d39f091444ddcf90446f008fc407cc9c8 100644 (file)
@@ -76,14 +76,19 @@ static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
 module_param(max_cstate, uint, 0000);
 static unsigned int nocst __read_mostly;
 module_param(nocst, uint, 0000);
+static int bm_check_disable __read_mostly;
+module_param(bm_check_disable, uint, 0000);
 
 static unsigned int latency_factor __read_mostly = 2;
 module_param(latency_factor, uint, 0644);
 
+#ifdef CONFIG_ACPI_PROCFS
 static u64 us_to_pm_timer_ticks(s64 t)
 {
        return div64_u64(t * PM_TIMER_FREQUENCY, 1000000);
 }
+#endif
+
 /*
  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  * For now disable this. Probably a bug somewhere else.
@@ -763,6 +768,9 @@ static int acpi_idle_bm_check(void)
 {
        u32 bm_status = 0;
 
+       if (bm_check_disable)
+               return 0;
+
        acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
        if (bm_status)
                acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
@@ -947,7 +955,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
        if (acpi_idle_suspend)
                return(acpi_idle_enter_c1(dev, state));
 
-       if (acpi_idle_bm_check()) {
+       if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
                if (dev->safe_state) {
                        dev->last_state = dev->safe_state;
                        return dev->safe_state->enter(dev, dev->safe_state);
index 5b7c52e4a00f09001becab5df101e7e5fa4dabc8..2862c781b37266aecee8052dac047aa2bcb31ef2 100644 (file)
@@ -81,6 +81,20 @@ static int acpi_sleep_prepare(u32 acpi_state)
 #ifdef CONFIG_ACPI_SLEEP
 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
 
+/*
+ * The ACPI specification wants us to save NVS memory regions during hibernation
+ * and to restore them during the subsequent resume.  Windows does that also for
+ * suspend to RAM.  However, it is known that this mechanism does not work on
+ * all machines, so we allow the user to disable it with the help of the
+ * 'acpi_sleep=nonvs' kernel command line option.
+ */
+static bool nvs_nosave;
+
+void __init acpi_nvs_nosave(void)
+{
+       nvs_nosave = true;
+}
+
 /*
  * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
  * user to request that behavior by using the 'acpi_old_suspend_ordering'
@@ -197,8 +211,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
        u32 acpi_state = acpi_suspend_states[pm_state];
        int error = 0;
 
-       error = suspend_nvs_alloc();
-
+       error = nvs_nosave ? 0 : suspend_nvs_alloc();
        if (error)
                return error;
 
@@ -388,20 +401,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
 #endif /* CONFIG_SUSPEND */
 
 #ifdef CONFIG_HIBERNATION
-/*
- * The ACPI specification wants us to save NVS memory regions during hibernation
- * and to restore them during the subsequent resume.  However, it is not certain
- * if this mechanism is going to work on all machines, so we allow the user to
- * disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line
- * option.
- */
-static bool s4_no_nvs;
-
-void __init acpi_s4_no_nvs(void)
-{
-       s4_no_nvs = true;
-}
-
 static unsigned long s4_hardware_signature;
 static struct acpi_table_facs *facs;
 static bool nosigcheck;
@@ -415,7 +414,7 @@ static int acpi_hibernation_begin(void)
 {
        int error;
 
-       error = s4_no_nvs ? 0 : suspend_nvs_alloc();
+       error = nvs_nosave ? 0 : suspend_nvs_alloc();
        if (!error) {
                acpi_target_sleep_state = ACPI_STATE_S4;
                acpi_sleep_tts_switch(acpi_target_sleep_state);
@@ -510,7 +509,7 @@ static int acpi_hibernation_begin_old(void)
        error = acpi_sleep_prepare(ACPI_STATE_S4);
 
        if (!error) {
-               if (!s4_no_nvs)
+               if (!nvs_nosave)
                        error = suspend_nvs_alloc();
                if (!error)
                        acpi_target_sleep_state = ACPI_STATE_S4;
index 9630fbdf4e6c210eb09a92f4489bb55f60e3b3a7..9b9d3bd54e3a5359f8f0aa3e833ab5380ea11a47 100644 (file)
@@ -673,7 +673,7 @@ static struct kobject *get_device_parent(struct device *dev,
                 */
                if (parent == NULL)
                        parent_kobj = virtual_device_parent(dev);
-               else if (parent->class)
+               else if (parent->class && !dev->class->ns_type)
                        return &parent->kobj;
                else
                        parent_kobj = &parent->kobj;
index d836a71bf06db79f5df5b1fb1ddbd6b79e660081..5bbc7be203a64202527607bfbb023bae26781552 100644 (file)
@@ -816,9 +816,9 @@ static const struct intel_driver_description {
        { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
            "HD Graphics", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
-           "Sandybridge", NULL, &intel_i965_driver },
+           "Sandybridge", NULL, &intel_gen6_driver },
        { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
-           "Sandybridge", NULL, &intel_i965_driver },
+           "Sandybridge", NULL, &intel_gen6_driver },
        { 0, 0, NULL, NULL, NULL }
 };
 
index 2547465d46584e15c8993086e2546f78872055de..c05e3e518268ac3cab497970f55371cc2c06acba 100644 (file)
 #define I810_PTE_LOCAL         0x00000002
 #define I810_PTE_VALID         0x00000001
 #define I830_PTE_SYSTEM_CACHED  0x00000006
+/* GT PTE cache control fields */
+#define GEN6_PTE_UNCACHED      0x00000002
+#define GEN6_PTE_LLC           0x00000004
+#define GEN6_PTE_LLC_MLC       0x00000006
+#define GEN6_PTE_GFDT          0x00000008
+
 #define I810_SMRAM_MISCC       0x70
 #define I810_GFX_MEM_WIN_SIZE  0x00010000
 #define I810_GFX_MEM_WIN_32M   0x00010000
index 9344216183a433a84210b5ebaef0efd57e6aa9c7..d22ffb811bf2cea4001a97d485327d668ff9d85a 100644 (file)
 #define USE_PCI_DMA_API 1
 #endif
 
+/* Max amount of stolen space, anything above will be returned to Linux */
+int intel_max_stolen = 32 * 1024 * 1024;
+EXPORT_SYMBOL(intel_max_stolen);
+
 static const struct aper_size_info_fixed intel_i810_sizes[] =
 {
        {64, 16384, 4},
@@ -104,7 +108,7 @@ static int intel_agp_map_memory(struct agp_memory *mem)
        DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
 
        if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
-               return -ENOMEM;
+               goto err;
 
        mem->sg_list = sg = st.sgl;
 
@@ -113,11 +117,14 @@ static int intel_agp_map_memory(struct agp_memory *mem)
 
        mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
                                 mem->page_count, PCI_DMA_BIDIRECTIONAL);
-       if (unlikely(!mem->num_sg)) {
-               intel_agp_free_sglist(mem);
-               return -ENOMEM;
-       }
+       if (unlikely(!mem->num_sg))
+               goto err;
+
        return 0;
+
+err:
+       sg_free_table(&st);
+       return -ENOMEM;
 }
 
 static void intel_agp_unmap_memory(struct agp_memory *mem)
@@ -176,7 +183,7 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
        if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
            agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
        {
-               cache_bits = I830_PTE_SYSTEM_CACHED;
+               cache_bits = GEN6_PTE_LLC_MLC;
        }
 
        for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
@@ -710,7 +717,12 @@ static void intel_i830_init_gtt_entries(void)
                        break;
                }
        }
-       if (gtt_entries > 0) {
+       if (!local && gtt_entries > intel_max_stolen) {
+               dev_info(&agp_bridge->dev->dev,
+                        "detected %dK stolen memory, trimming to %dK\n",
+                        gtt_entries / KB(1), intel_max_stolen / KB(1));
+               gtt_entries = intel_max_stolen / KB(4);
+       } else if (gtt_entries > 0) {
                dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
                       gtt_entries / KB(1), local ? "local" : "stolen");
                gtt_entries /= KB(4);
@@ -797,6 +809,10 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
 
        /* we have to call this as early as possible after the MMIO base address is known */
        intel_i830_init_gtt_entries();
+       if (intel_private.gtt_entries == 0) {
+               iounmap(intel_private.registers);
+               return -ENOMEM;
+       }
 
        agp_bridge->gatt_table = NULL;
 
@@ -1216,17 +1232,20 @@ static int intel_i915_get_gtt_size(void)
 
                /* G33's GTT size defined in gmch_ctrl */
                pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-               switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
-               case G33_PGETBL_SIZE_1M:
+               switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+               case I830_GMCH_GMS_STOLEN_512:
+                       size = 512;
+                       break;
+               case I830_GMCH_GMS_STOLEN_1024:
                        size = 1024;
                        break;
-               case G33_PGETBL_SIZE_2M:
-                       size = 2048;
+               case I830_GMCH_GMS_STOLEN_8192:
+                       size = 8*1024;
                        break;
                default:
                        dev_info(&agp_bridge->dev->dev,
                                 "unknown page table size 0x%x, assuming 512KB\n",
-                               (gmch_ctrl & G33_PGETBL_SIZE_MASK));
+                               (gmch_ctrl & I830_GMCH_GMS_MASK));
                        size = 512;
                }
        } else {
@@ -1279,6 +1298,11 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
 
        /* we have to call this as early as possible after the MMIO base address is known */
        intel_i830_init_gtt_entries();
+       if (intel_private.gtt_entries == 0) {
+               iounmap(intel_private.gtt);
+               iounmap(intel_private.registers);
+               return -ENOMEM;
+       }
 
        agp_bridge->gatt_table = NULL;
 
@@ -1306,6 +1330,16 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
        return addr | bridge->driver->masks[type].mask;
 }
 
+static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
+                                           dma_addr_t addr, int type)
+{
+       /* Shift high bits down */
+       addr |= (addr >> 28) & 0xff;
+
+       /* Type checking must be done elsewhere */
+       return addr | bridge->driver->masks[type].mask;
+}
+
 static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
 {
        u16 snb_gmch_ctl;
@@ -1387,6 +1421,11 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
 
        /* we have to call this as early as possible after the MMIO base address is known */
        intel_i830_init_gtt_entries();
+       if (intel_private.gtt_entries == 0) {
+               iounmap(intel_private.gtt);
+               iounmap(intel_private.registers);
+               return -ENOMEM;
+       }
 
        agp_bridge->gatt_table = NULL;
 
@@ -1514,6 +1553,39 @@ static const struct agp_bridge_driver intel_i965_driver = {
 #endif
 };
 
+static const struct agp_bridge_driver intel_gen6_driver = {
+       .owner                  = THIS_MODULE,
+       .aperture_sizes         = intel_i830_sizes,
+       .size_type              = FIXED_APER_SIZE,
+       .num_aperture_sizes     = 4,
+       .needs_scratch_page     = true,
+       .configure              = intel_i9xx_configure,
+       .fetch_size             = intel_i9xx_fetch_size,
+       .cleanup                = intel_i915_cleanup,
+       .mask_memory            = intel_gen6_mask_memory,
+       .masks                  = intel_i810_masks,
+       .agp_enable             = intel_i810_agp_enable,
+       .cache_flush            = global_cache_flush,
+       .create_gatt_table      = intel_i965_create_gatt_table,
+       .free_gatt_table        = intel_i830_free_gatt_table,
+       .insert_memory          = intel_i915_insert_entries,
+       .remove_memory          = intel_i915_remove_entries,
+       .alloc_by_type          = intel_i830_alloc_by_type,
+       .free_by_type           = intel_i810_free_by_type,
+       .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
+       .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
+       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
+       .chipset_flush          = intel_i915_chipset_flush,
+#ifdef USE_PCI_DMA_API
+       .agp_map_page           = intel_agp_map_page,
+       .agp_unmap_page         = intel_agp_unmap_page,
+       .agp_map_memory         = intel_agp_map_memory,
+       .agp_unmap_memory       = intel_agp_unmap_memory,
+#endif
+};
+
 static const struct agp_bridge_driver intel_g33_driver = {
        .owner                  = THIS_MODULE,
        .aperture_sizes         = intel_i830_sizes,
index 24314a9cffe83875858b4d2b4aeb0ab5a68acf21..1030f8420137adb8ff7363eb259f9578026fe178 100644 (file)
@@ -623,7 +623,14 @@ static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
 
 static int tpm_tis_pnp_resume(struct pnp_dev *dev)
 {
-       return tpm_pm_resume(&dev->dev);
+       struct tpm_chip *chip = pnp_get_drvdata(dev);
+       int ret;
+
+       ret = tpm_pm_resume(&dev->dev);
+       if (!ret)
+               tpm_continue_selftest(chip);
+
+       return ret;
 }
 
 static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
index 063b2184caf5506fc1e1a83e1cd6c8dc6f22b6c3..938b74ea9ffbee8e6c195baa2eae97520c7a07cb 100644 (file)
@@ -1077,6 +1077,7 @@ err_out_unregister:
 
 err_unlock_policy:
        unlock_policy_rwsem_write(cpu);
+       free_cpumask_var(policy->related_cpus);
 err_free_cpumask:
        free_cpumask_var(policy->cpus);
 err_free_policy:
@@ -1762,17 +1763,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
                        dprintk("governor switch\n");
 
                        /* end old governor */
-                       if (data->governor) {
-                               /*
-                                * Need to release the rwsem around governor
-                                * stop due to lock dependency between
-                                * cancel_delayed_work_sync and the read lock
-                                * taken in the delayed work handler.
-                                */
-                               unlock_policy_rwsem_write(data->cpu);
+                       if (data->governor)
                                __cpufreq_governor(data, CPUFREQ_GOV_STOP);
-                               lock_policy_rwsem_write(data->cpu);
-                       }
 
                        /* start new governor */
                        data->governor = policy->governor;
index cc9357da0e3442ad9666a415f80b3dd29321b6a0..e0187d16dd7c53fd240b58d62005e4c17df14bc4 100644 (file)
@@ -1300,7 +1300,7 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
                if (devno == 0)
                        return -ENODEV;
 
-               i7core_printk(KERN_ERR,
+               i7core_printk(KERN_INFO,
                        "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
                        dev_descr->dev, dev_descr->func,
                        PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
index f39b00a46eda36c1d6e6d265e7aa02245d012556..1052340e6802658db085627f501f3c6102347e67 100644 (file)
@@ -336,6 +336,7 @@ static struct of_device_id mpc85xx_pci_err_of_match[] = {
        },
        {},
 };
+MODULE_DEVICE_TABLE(of, mpc85xx_pci_err_of_match);
 
 static struct of_platform_driver mpc85xx_pci_err_driver = {
        .probe = mpc85xx_pci_err_probe,
@@ -650,6 +651,7 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
        { .compatible = "fsl,p2020-l2-cache-controller", },
        {},
 };
+MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
 
 static struct of_platform_driver mpc85xx_l2_err_driver = {
        .probe = mpc85xx_l2_err_probe,
@@ -1126,6 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
        { .compatible = "fsl,p2020-memory-controller", },
        {},
 };
+MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
 
 static struct of_platform_driver mpc85xx_mc_err_driver = {
        .probe = mpc85xx_mc_err_probe,
index 3ca36542e3385b0c62a33b703e637fac31884c93..4e51fe3c1fc4f97594d96fcb9a4e719fa40b8edc 100644 (file)
@@ -893,10 +893,12 @@ EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
 void gpio_unexport(unsigned gpio)
 {
        struct gpio_desc        *desc;
-       int                     status = -EINVAL;
+       int                     status = 0;
 
-       if (!gpio_is_valid(gpio))
+       if (!gpio_is_valid(gpio)) {
+               status = -EINVAL;
                goto done;
+       }
 
        mutex_lock(&sysfs_lock);
 
@@ -911,7 +913,6 @@ void gpio_unexport(unsigned gpio)
                        clear_bit(FLAG_EXPORT, &desc->flags);
                        put_device(dev);
                        device_unregister(dev);
-                       status = 0;
                } else
                        status = -ENODEV;
        }
index 83d8072066cb80b809b1eb0f9791b834cd0e7329..ea1d57291b0e85d72259d6d699c9ce581029a25b 100644 (file)
@@ -864,8 +864,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
                mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
                                    false);
                mode->hdisplay = 1366;
-               mode->vsync_start = mode->vsync_start - 1;
-               mode->vsync_end = mode->vsync_end - 1;
+               mode->hsync_start = mode->hsync_start - 1;
+               mode->hsync_end = mode->hsync_end - 1;
                return mode;
        }
 
index aee83fa178f6a6dc60c7273eaaae29b43dbe000d..9214119c01540fb5244d3ca383adb1c5ee04722c 100644 (file)
@@ -605,6 +605,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
                case FBC_NOT_TILED:
                        seq_printf(m, "scanout buffer not tiled");
                        break;
+               case FBC_MULTIPLE_PIPES:
+                       seq_printf(m, "multiple pipes are enabled");
+                       break;
                default:
                        seq_printf(m, "unknown reason");
                }
index 92898035845d606e134865b6d9348d187909b979..f19ffe87af3c3fbcef67e03a25a6e4a22e5fe133 100644 (file)
@@ -41,6 +41,8 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 
+extern int intel_max_stolen; /* from AGP driver */
+
 /**
  * Sets up the hardware status page for devices that need a physical address
  * in the register.
@@ -1257,7 +1259,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
                drm_mm_put_block(compressed_fb);
        }
 
-       if (!IS_GM45(dev)) {
+       if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
                compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
                                                    4096, 0);
                if (!compressed_llb) {
@@ -1283,8 +1285,9 @@ static void i915_setup_compression(struct drm_device *dev, int size)
 
        intel_disable_fbc(dev);
        dev_priv->compressed_fb = compressed_fb;
-
-       if (IS_GM45(dev)) {
+       if (IS_IRONLAKE_M(dev))
+               I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+       else if (IS_GM45(dev)) {
                I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
        } else {
                I915_WRITE(FBC_CFB_BASE, cfb_base);
@@ -1292,7 +1295,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
                dev_priv->compressed_llb = compressed_llb;
        }
 
-       DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
+       DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
                  ll_base, size >> 20);
 }
 
@@ -1301,7 +1304,7 @@ static void i915_cleanup_compression(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        drm_mm_put_block(dev_priv->compressed_fb);
-       if (!IS_GM45(dev))
+       if (dev_priv->compressed_llb)
                drm_mm_put_block(dev_priv->compressed_llb);
 }
 
@@ -2105,6 +2108,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (ret)
                goto out_iomapfree;
 
+       if (prealloc_size > intel_max_stolen) {
+               DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
+                        prealloc_size >> 20, intel_max_stolen >> 20);
+               prealloc_size = intel_max_stolen;
+       }
+
        dev_priv->wq = create_singlethread_workqueue("i915");
        if (dev_priv->wq == NULL) {
                DRM_ERROR("Failed to create our workqueue.\n");
index 65d3f3e8475b2e5a81bc0f3f14e20ad28b86dc6b..5044f653e8ea981d46729360b5dc609836a319ab 100644 (file)
@@ -93,11 +93,11 @@ static const struct intel_device_info intel_i945gm_info = {
 };
 
 static const struct intel_device_info intel_i965g_info = {
-       .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
+       .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_i965gm_info = {
-       .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
+       .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
        .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
        .has_hotplug = 1,
 };
@@ -114,7 +114,7 @@ static const struct intel_device_info intel_g45_info = {
 };
 
 static const struct intel_device_info intel_gm45_info = {
-       .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
+       .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
        .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
        .has_pipe_cxsr = 1,
        .has_hotplug = 1,
@@ -134,7 +134,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
 
 static const struct intel_device_info intel_ironlake_m_info = {
        .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
-       .need_gfx_hws = 1, .has_rc6 = 1,
+       .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
        .has_hotplug = 1,
 };
 
@@ -148,33 +148,33 @@ static const struct intel_device_info intel_sandybridge_m_info = {
        .has_hotplug = 1, .is_gen6 = 1,
 };
 
-static const struct pci_device_id pciidlist[] = {
-       INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
-       INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
-       INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
+static const struct pci_device_id pciidlist[] = {              /* aka */
+       INTEL_VGA_DEVICE(0x3577, &intel_i830_info),             /* I830_M */
+       INTEL_VGA_DEVICE(0x2562, &intel_845g_info),             /* 845_G */
+       INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),             /* I855_GM */
        INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
-       INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
-       INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
-       INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
-       INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
-       INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
-       INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
-       INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
-       INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
-       INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
-       INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
-       INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
-       INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
-       INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
-       INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
-       INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
-       INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
-       INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
-       INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
-       INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
-       INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
-       INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
-       INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
+       INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),            /* I865_G */
+       INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),            /* I915_G */
+       INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),            /* E7221_G */
+       INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),           /* I915_GM */
+       INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),            /* I945_G */
+       INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),           /* I945_GM */
+       INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),           /* I945_GME */
+       INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),            /* I946_GZ */
+       INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),            /* G35_G */
+       INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),            /* I965_Q */
+       INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),            /* I965_G */
+       INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),              /* Q35_G */
+       INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),              /* G33_G */
+       INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),              /* Q33_G */
+       INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),           /* I965_GM */
+       INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),           /* I965_GME */
+       INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),             /* GM45_G */
+       INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),              /* IGD_E_G */
+       INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),              /* Q45_G */
+       INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),              /* G45_G */
+       INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),              /* G41_G */
+       INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),              /* B43_G */
        INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
        INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
        INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
@@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
        /*
         * Clear request list
         */
-       i915_gem_retire_requests(dev, &dev_priv->render_ring);
+       i915_gem_retire_requests(dev);
 
        if (need_display)
                i915_save_display(dev);
@@ -482,7 +482,7 @@ static int i915_pm_poweroff(struct device *dev)
        return i915_drm_freeze(drm_dev);
 }
 
-const struct dev_pm_ops i915_pm_ops = {
+static const struct dev_pm_ops i915_pm_ops = {
      .suspend = i915_pm_suspend,
      .resume = i915_pm_resume,
      .freeze = i915_pm_freeze,
index d147ab2f5bfcabfc8bc82cd5ccdea93064fa4cf0..906663b9929e7cf6b951bf3d91b135b0219b38fc 100644 (file)
@@ -176,7 +176,8 @@ struct drm_i915_display_funcs {
        int (*get_display_clock_speed)(struct drm_device *dev);
        int (*get_fifo_size)(struct drm_device *dev, int plane);
        void (*update_wm)(struct drm_device *dev, int planea_clock,
-                         int planeb_clock, int sr_hdisplay, int pixel_size);
+                         int planeb_clock, int sr_hdisplay, int sr_htotal,
+                         int pixel_size);
        /* clock updates for mode set */
        /* cursor updates */
        /* render clock increase/decrease */
@@ -200,6 +201,8 @@ struct intel_device_info {
        u8 need_gfx_hws : 1;
        u8 is_g4x : 1;
        u8 is_pineview : 1;
+       u8 is_broadwater : 1;
+       u8 is_crestline : 1;
        u8 is_ironlake : 1;
        u8 is_gen6 : 1;
        u8 has_fbc : 1;
@@ -215,6 +218,7 @@ enum no_fbc_reason {
        FBC_MODE_TOO_LARGE, /* mode too large for compression */
        FBC_BAD_PLANE, /* fbc not supported on plane */
        FBC_NOT_TILED, /* buffer not tiled */
+       FBC_MULTIPLE_PIPES, /* more than one pipe active */
 };
 
 enum intel_pch {
@@ -222,6 +226,8 @@ enum intel_pch {
        PCH_CPT,        /* Cougarpoint PCH */
 };
 
+#define QUIRK_PIPEA_FORCE (1<<0)
+
 struct intel_fbdev;
 
 typedef struct drm_i915_private {
@@ -285,6 +291,8 @@ typedef struct drm_i915_private {
        struct timer_list hangcheck_timer;
        int hangcheck_count;
        uint32_t last_acthd;
+       uint32_t last_instdone;
+       uint32_t last_instdone1;
 
        struct drm_mm vram;
 
@@ -337,6 +345,8 @@ typedef struct drm_i915_private {
        /* PCH chipset type */
        enum intel_pch pch_type;
 
+       unsigned long quirks;
+
        /* Register state */
        bool modeset_on_lid;
        u8 saveLBB;
@@ -541,6 +551,14 @@ typedef struct drm_i915_private {
                /** LRU list of objects with fence regs on them. */
                struct list_head fence_list;
 
+               /**
+                * List of objects currently pending being freed.
+                *
+                * These objects are no longer in use, but due to a signal
+                * we were prevented from freeing them at the appointed time.
+                */
+               struct list_head deferred_free_list;
+
                /**
                 * We leave the user IRQ off as much as possible,
                 * but this means that requests will finish and never
@@ -672,7 +690,7 @@ struct drm_i915_gem_object {
         *
         * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
         */
-       int fence_reg : 5;
+       signed int fence_reg : 5;
 
        /**
         * Used for checking the object doesn't appear more than once
@@ -708,7 +726,7 @@ struct drm_i915_gem_object {
         *
         * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
         * bits with absolutely no headroom. So use 4 bits. */
-       int pin_count : 4;
+       unsigned int pin_count : 4;
 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 
        /** AGP memory structure for our GTT binding. */
@@ -738,7 +756,7 @@ struct drm_i915_gem_object {
        uint32_t stride;
 
        /** Record of address bit 17 of each page at last unbind. */
-       long *bit_17;
+       unsigned long *bit_17;
 
        /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
        uint32_t agp_type;
@@ -950,8 +968,7 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev,
 bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
 int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
 int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-void i915_gem_retire_requests(struct drm_device *dev,
-                struct intel_ring_buffer *ring);
+void i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_work_handler(struct work_struct *work);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -981,7 +998,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
 int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
 void i915_gem_object_put_pages(struct drm_gem_object *obj);
 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
-void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
+int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
 
 void i915_gem_shrinker_init(void);
 void i915_gem_shrinker_exit(void);
@@ -1041,6 +1058,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
 extern void i8xx_disable_fbc(struct drm_device *dev);
 extern void g4x_disable_fbc(struct drm_device *dev);
+extern void ironlake_disable_fbc(struct drm_device *dev);
 extern void intel_disable_fbc(struct drm_device *dev);
 extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
 extern bool intel_fbc_enabled(struct drm_device *dev);
@@ -1130,6 +1148,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 #define IS_I945GM(dev)         (INTEL_INFO(dev)->is_i945gm)
 #define IS_I965G(dev)          (INTEL_INFO(dev)->is_i965g)
 #define IS_I965GM(dev)         (INTEL_INFO(dev)->is_i965gm)
+#define IS_BROADWATER(dev)     (INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev)      (INTEL_INFO(dev)->is_crestline)
 #define IS_GM45(dev)           ((dev)->pci_device == 0x2A42)
 #define IS_G4X(dev)            (INTEL_INFO(dev)->is_g4x)
 #define IS_PINEVIEW_G(dev)     ((dev)->pci_device == 0xa001)
index 15d2d93aaca997f551bd82cc6222a5963b9bd607..4efd4fd3b340e8f185b407ddf9764ab446ecd9ba 100644 (file)
@@ -35,7 +35,7 @@
 #include <linux/swap.h>
 #include <linux/pci.h>
 
-static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -53,6 +53,7 @@ static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file_priv);
+static void i915_gem_free_object_tail(struct drm_gem_object *obj);
 
 static LIST_HEAD(shrink_list);
 static DEFINE_SPINLOCK(shrink_list_lock);
@@ -127,8 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
                return -ENOMEM;
 
        ret = drm_gem_handle_create(file_priv, obj, &handle);
-       drm_gem_object_handle_unreference_unlocked(obj);
-
+       drm_gem_object_unreference_unlocked(obj);
        if (ret)
                return ret;
 
@@ -1709,9 +1709,9 @@ i915_get_gem_seqno(struct drm_device *dev,
 /**
  * This function clears the request list as sequence numbers are passed.
  */
-void
-i915_gem_retire_requests(struct drm_device *dev,
-               struct intel_ring_buffer *ring)
+static void
+i915_gem_retire_requests_ring(struct drm_device *dev,
+                             struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
@@ -1750,6 +1750,30 @@ i915_gem_retire_requests(struct drm_device *dev,
        }
 }
 
+void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       if (!list_empty(&dev_priv->mm.deferred_free_list)) {
+           struct drm_i915_gem_object *obj_priv, *tmp;
+
+           /* We must be careful that during unbind() we do not
+            * accidentally infinitely recurse into retire requests.
+            * Currently:
+            *   retire -> free -> unbind -> wait -> retire_ring
+            */
+           list_for_each_entry_safe(obj_priv, tmp,
+                                    &dev_priv->mm.deferred_free_list,
+                                    list)
+                   i915_gem_free_object_tail(&obj_priv->base);
+       }
+
+       i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
+       if (HAS_BSD(dev))
+               i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+}
+
 void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
@@ -1761,10 +1785,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
        dev = dev_priv->dev;
 
        mutex_lock(&dev->struct_mutex);
-       i915_gem_retire_requests(dev, &dev_priv->render_ring);
-
-       if (HAS_BSD(dev))
-               i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+       i915_gem_retire_requests(dev);
 
        if (!dev_priv->mm.suspended &&
                (!list_empty(&dev_priv->render_ring.request_list) ||
@@ -1832,7 +1853,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
         * a separate wait queue to handle that.
         */
        if (ret == 0)
-               i915_gem_retire_requests(dev, ring);
+               i915_gem_retire_requests_ring(dev, ring);
 
        return ret;
 }
@@ -1945,11 +1966,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
         * before we unbind.
         */
        ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-       if (ret) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("set_domain failed: %d\n", ret);
+       if (ret == -ERESTARTSYS)
                return ret;
-       }
+       /* Continue on if we fail due to EIO, the GPU is hung so we
+        * should be safe and we need to cleanup or else we might
+        * cause memory corruption through use-after-free.
+        */
 
        BUG_ON(obj_priv->active);
 
@@ -1985,7 +2007,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 
        trace_i915_gem_object_unbind(obj);
 
-       return 0;
+       return ret;
 }
 
 static struct drm_gem_object *
@@ -2107,10 +2129,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
        struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
        struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
        for (;;) {
-               i915_gem_retire_requests(dev, render_ring);
-
-               if (HAS_BSD(dev))
-                       i915_gem_retire_requests(dev, bsd_ring);
+               i915_gem_retire_requests(dev);
 
                /* If there's an inactive buffer available now, grab it
                 * and be done.
@@ -2583,7 +2602,10 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
        if (!IS_I965G(dev)) {
                int ret;
 
-               i915_gem_object_flush_gpu_write_domain(obj);
+               ret = i915_gem_object_flush_gpu_write_domain(obj);
+               if (ret != 0)
+                       return ret;
+
                ret = i915_gem_object_wait_rendering(obj);
                if (ret != 0)
                        return ret;
@@ -2731,7 +2753,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
 }
 
 /** Flushes any GPU write domain for the object if it's dirty. */
-static void
+static int
 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
@@ -2739,17 +2761,18 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
-               return;
+               return 0;
 
        /* Queue the GPU write cache flushing we need. */
        old_write_domain = obj->write_domain;
        i915_gem_flush(dev, 0, obj->write_domain);
-       (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
-       BUG_ON(obj->write_domain);
+       if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
+               return -ENOMEM;
 
        trace_i915_gem_object_change_domain(obj,
                                            obj->read_domains,
                                            old_write_domain);
+       return 0;
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
@@ -2793,9 +2816,11 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
                                            old_write_domain);
 }
 
-void
+int
 i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
 {
+       int ret = 0;
+
        switch (obj->write_domain) {
        case I915_GEM_DOMAIN_GTT:
                i915_gem_object_flush_gtt_write_domain(obj);
@@ -2804,9 +2829,11 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
                i915_gem_object_flush_cpu_write_domain(obj);
                break;
        default:
-               i915_gem_object_flush_gpu_write_domain(obj);
+               ret = i915_gem_object_flush_gpu_write_domain(obj);
                break;
        }
+
+       return ret;
 }
 
 /**
@@ -2826,7 +2853,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
        if (obj_priv->gtt_space == NULL)
                return -EINVAL;
 
-       i915_gem_object_flush_gpu_write_domain(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj);
+       if (ret != 0)
+               return ret;
+
        /* Wait on any GPU rendering and flushing to occur. */
        ret = i915_gem_object_wait_rendering(obj);
        if (ret != 0)
@@ -2876,7 +2906,9 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
        if (obj_priv->gtt_space == NULL)
                return -EINVAL;
 
-       i915_gem_object_flush_gpu_write_domain(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj);
+       if (ret)
+               return ret;
 
        /* Wait on any GPU rendering and flushing to occur. */
        if (obj_priv->active) {
@@ -2924,7 +2956,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
-       i915_gem_object_flush_gpu_write_domain(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj);
+       if (ret)
+               return ret;
+
        /* Wait on any GPU rendering and flushing to occur. */
        ret = i915_gem_object_wait_rendering(obj);
        if (ret != 0)
@@ -3214,7 +3249,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
        if (offset == 0 && size == obj->size)
                return i915_gem_object_set_to_cpu_domain(obj, 0);
 
-       i915_gem_object_flush_gpu_write_domain(obj);
+       ret = i915_gem_object_flush_gpu_write_domain(obj);
+       if (ret)
+               return ret;
+
        /* Wait on any GPU rendering and flushing to occur. */
        ret = i915_gem_object_wait_rendering(obj);
        if (ret != 0)
@@ -3645,6 +3683,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
        return ret;
 }
 
+
 int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file_priv,
@@ -3792,7 +3831,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                                unsigned long long total_size = 0;
                                int num_fences = 0;
                                for (i = 0; i < args->buffer_count; i++) {
-                                       obj_priv = object_list[i]->driver_private;
+                                       obj_priv = to_intel_bo(object_list[i]);
 
                                        total_size += object_list[i]->size;
                                        num_fences +=
@@ -4310,7 +4349,6 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_busy *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
-       drm_i915_private_t *dev_priv = dev->dev_private;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
@@ -4325,10 +4363,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
         * actually unmasked, and our working set ends up being larger than
         * required.
         */
-       i915_gem_retire_requests(dev, &dev_priv->render_ring);
-
-       if (HAS_BSD(dev))
-               i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+       i915_gem_retire_requests(dev);
 
        obj_priv = to_intel_bo(obj);
        /* Don't count being on the flushing list against the object being
@@ -4438,20 +4473,19 @@ int i915_gem_init_object(struct drm_gem_object *obj)
        return 0;
 }
 
-void i915_gem_free_object(struct drm_gem_object *obj)
+static void i915_gem_free_object_tail(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       int ret;
 
-       trace_i915_gem_object_destroy(obj);
-
-       while (obj_priv->pin_count > 0)
-               i915_gem_object_unpin(obj);
-
-       if (obj_priv->phys_obj)
-               i915_gem_detach_phys_object(dev, obj);
-
-       i915_gem_object_unbind(obj);
+       ret = i915_gem_object_unbind(obj);
+       if (ret == -ERESTARTSYS) {
+               list_move(&obj_priv->list,
+                         &dev_priv->mm.deferred_free_list);
+               return;
+       }
 
        if (obj_priv->mmap_offset)
                i915_gem_free_mmap_offset(obj);
@@ -4463,6 +4497,22 @@ void i915_gem_free_object(struct drm_gem_object *obj)
        kfree(obj_priv);
 }
 
+void i915_gem_free_object(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
+       trace_i915_gem_object_destroy(obj);
+
+       while (obj_priv->pin_count > 0)
+               i915_gem_object_unpin(obj);
+
+       if (obj_priv->phys_obj)
+               i915_gem_detach_phys_object(dev, obj);
+
+       i915_gem_free_object_tail(obj);
+}
+
 /** Unbinds all inactive objects. */
 static int
 i915_gem_evict_from_inactive_list(struct drm_device *dev)
@@ -4686,9 +4736,19 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
        mutex_unlock(&dev->struct_mutex);
 
-       drm_irq_install(dev);
+       ret = drm_irq_install(dev);
+       if (ret)
+               goto cleanup_ringbuffer;
 
        return 0;
+
+cleanup_ringbuffer:
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_cleanup_ringbuffer(dev);
+       dev_priv->mm.suspended = 1;
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
 }
 
 int
@@ -4726,6 +4786,7 @@ i915_gem_load(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+       INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
        INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
        INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
        if (HAS_BSD(dev)) {
@@ -5024,10 +5085,7 @@ rescan:
                        continue;
 
                spin_unlock(&shrink_list_lock);
-               i915_gem_retire_requests(dev, &dev_priv->render_ring);
-
-               if (HAS_BSD(dev))
-                       i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+               i915_gem_retire_requests(dev);
 
                list_for_each_entry_safe(obj_priv, next_obj,
                                         &dev_priv->mm.inactive_list,
index 4b7c49d4257d3de8d938680280762e1b2e1e4ab6..155719e4d16fa899745bbf2ee757c9fdd65cc827 100644 (file)
@@ -333,8 +333,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                        i915_gem_release_mmap(obj);
 
                if (ret != 0) {
-                       WARN(ret != -ERESTARTSYS,
-                            "failed to reset object for tiling switch");
                        args->tiling_mode = obj_priv->tiling_mode;
                        args->stride = obj_priv->stride;
                        goto err;
index dba53d4b9fb3a11b7b76194e0fa6aec97211e966..85785a8844ed5e422c99ca96a4a88e5d6a404a1a 100644 (file)
@@ -171,10 +171,10 @@ void intel_enable_asle (struct drm_device *dev)
                ironlake_enable_display_irq(dev_priv, DE_GSE);
        else {
                i915_enable_pipestat(dev_priv, 1,
-                                    I915_LEGACY_BLC_EVENT_ENABLE);
+                                    PIPE_LEGACY_BLC_EVENT_ENABLE);
                if (IS_I965G(dev))
                        i915_enable_pipestat(dev_priv, 0,
-                                            I915_LEGACY_BLC_EVENT_ENABLE);
+                                            PIPE_LEGACY_BLC_EVENT_ENABLE);
        }
 }
 
@@ -842,7 +842,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
        u32 iir, new_iir;
        u32 pipea_stats, pipeb_stats;
        u32 vblank_status;
-       u32 vblank_enable;
        int vblank = 0;
        unsigned long irqflags;
        int irq_received;
@@ -856,13 +855,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 
        iir = I915_READ(IIR);
 
-       if (IS_I965G(dev)) {
-               vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
-               vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
-       } else {
-               vblank_status = I915_VBLANK_INTERRUPT_STATUS;
-               vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
-       }
+       if (IS_I965G(dev))
+               vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
+       else
+               vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
 
        for (;;) {
                irq_received = iir != 0;
@@ -966,8 +962,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                                intel_finish_page_flip(dev, 1);
                }
 
-               if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
-                   (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+               if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
+                   (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
                    (iir & I915_ASLE_INTERRUPT))
                        opregion_asle_intr(dev);
 
@@ -1233,16 +1229,21 @@ void i915_hangcheck_elapsed(unsigned long data)
 {
        struct drm_device *dev = (struct drm_device *)data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t acthd;
+       uint32_t acthd, instdone, instdone1;
 
        /* No reset support on this chip yet. */
        if (IS_GEN6(dev))
                return;
 
-       if (!IS_I965G(dev))
+       if (!IS_I965G(dev)) {
                acthd = I915_READ(ACTHD);
-       else
+               instdone = I915_READ(INSTDONE);
+               instdone1 = 0;
+       } else {
                acthd = I915_READ(ACTHD_I965);
+               instdone = I915_READ(INSTDONE_I965);
+               instdone1 = I915_READ(INSTDONE1);
+       }
 
        /* If all work is done then ACTHD clearly hasn't advanced. */
        if (list_empty(&dev_priv->render_ring.request_list) ||
@@ -1253,21 +1254,24 @@ void i915_hangcheck_elapsed(unsigned long data)
                return;
        }
 
-       if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
-               DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
-               i915_handle_error(dev, true);
-               return;
-       } 
+       if (dev_priv->last_acthd == acthd &&
+           dev_priv->last_instdone == instdone &&
+           dev_priv->last_instdone1 == instdone1) {
+               if (dev_priv->hangcheck_count++ > 1) {
+                       DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+                       i915_handle_error(dev, true);
+                       return;
+               }
+       } else {
+               dev_priv->hangcheck_count = 0;
+
+               dev_priv->last_acthd = acthd;
+               dev_priv->last_instdone = instdone;
+               dev_priv->last_instdone1 = instdone1;
+       }
 
        /* Reset timer case chip hangs without another request being added */
        mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
-
-       if (acthd != dev_priv->last_acthd)
-               dev_priv->hangcheck_count = 0;
-       else
-               dev_priv->hangcheck_count++;
-
-       dev_priv->last_acthd = acthd;
 }
 
 /* drm_dma.h hooks
index 6d9b0288272a7e0c4f424bd66de1964af60e9a22..281db6e5403ad1d9ed024e2924cfbc0344c1951a 100644 (file)
 #define GEN6_RENDER_IMR                0x20a8
 #define   GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT         (1 << 8)
 #define   GEN6_RENDER_PPGTT_PAGE_FAULT                 (1 << 7)
-#define   GEN6_RENDER TIMEOUT_COUNTER_EXPIRED          (1 << 6)
+#define   GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED          (1 << 6)
 #define   GEN6_RENDER_L3_PARITY_ERROR                  (1 << 5)
 #define   GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT    (1 << 4)
 #define   GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR      (1 << 3)
 #define DPFC_CHICKEN           0x3224
 #define   DPFC_HT_MODIFY       (1<<31)
 
+/* Framebuffer compression for Ironlake */
+#define ILK_DPFC_CB_BASE       0x43200
+#define ILK_DPFC_CONTROL       0x43208
+/* The bit 28-8 is reserved */
+#define   DPFC_RESERVED                (0x1FFFFF00)
+#define ILK_DPFC_RECOMP_CTL    0x4320c
+#define ILK_DPFC_STATUS                0x43210
+#define ILK_DPFC_FENCE_YOFF    0x43218
+#define ILK_DPFC_CHICKEN       0x43224
+#define ILK_FBC_RT_BASE                0x2128
+#define   ILK_FBC_RT_VALID     (1<<0)
+
+#define ILK_DISPLAY_CHICKEN1   0x42000
+#define   ILK_FBCQ_DIS         (1<<22)
+
 /*
  * GPIO regs
  */
 #define   DPLL_FPA01_P1_POST_DIV_MASK  0x00ff0000 /* i915 */
 #define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
 
-#define I915_FIFO_UNDERRUN_STATUS              (1UL<<31)
-#define I915_CRC_ERROR_ENABLE                  (1UL<<29)
-#define I915_CRC_DONE_ENABLE                   (1UL<<28)
-#define I915_GMBUS_EVENT_ENABLE                        (1UL<<27)
-#define I915_VSYNC_INTERRUPT_ENABLE            (1UL<<25)
-#define I915_DISPLAY_LINE_COMPARE_ENABLE       (1UL<<24)
-#define I915_DPST_EVENT_ENABLE                 (1UL<<23)
-#define I915_LEGACY_BLC_EVENT_ENABLE           (1UL<<22)
-#define I915_ODD_FIELD_INTERRUPT_ENABLE                (1UL<<21)
-#define I915_EVEN_FIELD_INTERRUPT_ENABLE       (1UL<<20)
-#define I915_START_VBLANK_INTERRUPT_ENABLE     (1UL<<18)       /* 965 or later */
-#define I915_VBLANK_INTERRUPT_ENABLE           (1UL<<17)
-#define I915_OVERLAY_UPDATED_ENABLE            (1UL<<16)
-#define I915_CRC_ERROR_INTERRUPT_STATUS                (1UL<<13)
-#define I915_CRC_DONE_INTERRUPT_STATUS         (1UL<<12)
-#define I915_GMBUS_INTERRUPT_STATUS            (1UL<<11)
-#define I915_VSYNC_INTERRUPT_STATUS            (1UL<<9)
-#define I915_DISPLAY_LINE_COMPARE_STATUS       (1UL<<8)
-#define I915_DPST_EVENT_STATUS                 (1UL<<7)
-#define I915_LEGACY_BLC_EVENT_STATUS           (1UL<<6)
-#define I915_ODD_FIELD_INTERRUPT_STATUS                (1UL<<5)
-#define I915_EVEN_FIELD_INTERRUPT_STATUS       (1UL<<4)
-#define I915_START_VBLANK_INTERRUPT_STATUS     (1UL<<2)        /* 965 or later */
-#define I915_VBLANK_INTERRUPT_STATUS           (1UL<<1)
-#define I915_OVERLAY_UPDATED_STATUS            (1UL<<0)
-
 #define SRX_INDEX              0x3c4
 #define SRX_DATA               0x3c5
 #define SR01                   1
 #define I830_FIFO_LINE_SIZE    32
 
 #define G4X_FIFO_SIZE          127
-#define I945_FIFO_SIZE         127 /* 945 & 965 */
+#define I965_FIFO_SIZE         512
+#define I945_FIFO_SIZE         127
 #define I915_FIFO_SIZE         95
 #define I855GM_FIFO_SIZE       127 /* In cachelines */
 #define I830_FIFO_SIZE         95
 #define PINEVIEW_CURSOR_DFT_WM 0
 #define PINEVIEW_CURSOR_GUARD_WM       5
 
+#define I965_CURSOR_FIFO       64
+#define I965_CURSOR_MAX_WM     32
+#define I965_CURSOR_DFT_WM     8
 
 /* define the Watermark register on Ironlake */
 #define WM0_PIPEA_ILK          0x45100
 #define ILK_DISPLAY_FIFO       128
 #define ILK_DISPLAY_MAXWM      64
 #define ILK_DISPLAY_DFTWM      8
+#define ILK_CURSOR_FIFO                32
+#define ILK_CURSOR_MAXWM       16
+#define ILK_CURSOR_DFTWM       8
 
 #define ILK_DISPLAY_SR_FIFO    512
 #define ILK_DISPLAY_MAX_SRWM   0x1ff
 #define  ILK_VSDPFD_FULL       (1<<21)
 #define ILK_DSPCLK_GATE                0x42020
 #define  ILK_DPARB_CLK_GATE    (1<<5)
+/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
+#define   ILK_CLK_FBC          (1<<7)
+#define   ILK_DPFC_DIS1                (1<<8)
+#define   ILK_DPFC_DIS2                (1<<9)
 
 #define DISP_ARB_CTL   0x45000
 #define  DISP_TILE_SURFACE_SWIZZLING   (1<<13)
 
 #define PCH_PP_STATUS          0xc7200
 #define PCH_PP_CONTROL         0xc7204
+#define  PANEL_UNLOCK_REGS     (0xabcd << 16)
 #define  EDP_FORCE_VDD         (1 << 3)
 #define  EDP_BLC_ENABLE                (1 << 2)
 #define  PANEL_POWER_RESET     (1 << 1)
index 60a5800fba6e33c6638dc7fc455a33b041a3df8d..6e2025274db500e9d11c5ace422000196e457dd6 100644 (file)
@@ -602,7 +602,9 @@ void i915_save_display(struct drm_device *dev)
 
        /* Only save FBC state on the platform that supports FBC */
        if (I915_HAS_FBC(dev)) {
-               if (IS_GM45(dev)) {
+               if (IS_IRONLAKE_M(dev)) {
+                       dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+               } else if (IS_GM45(dev)) {
                        dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
                } else {
                        dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
@@ -706,7 +708,10 @@ void i915_restore_display(struct drm_device *dev)
 
        /* only restore FBC info on the platform that supports FBC*/
        if (I915_HAS_FBC(dev)) {
-               if (IS_GM45(dev)) {
+               if (IS_IRONLAKE_M(dev)) {
+                       ironlake_disable_fbc(dev);
+                       I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+               } else if (IS_GM45(dev)) {
                        g4x_disable_fbc(dev);
                        I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
                } else {
index f879589bead10796c523f3ea1d79135a3105d1c9..ae1718549eecf548663cafa8de9a6772088e3c89 100644 (file)
@@ -43,6 +43,7 @@
 bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
 static void intel_update_watermarks(struct drm_device *dev);
 static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
+static void intel_crtc_update_cursor(struct drm_crtc *crtc);
 
 typedef struct {
     /* given values */
@@ -323,6 +324,9 @@ struct intel_limit {
 #define IRONLAKE_DP_P1_MIN             1
 #define IRONLAKE_DP_P1_MAX             2
 
+/* FDI */
+#define IRONLAKE_FDI_FREQ              2700000 /* in kHz for mode->clock */
+
 static bool
 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
                    int target, int refclk, intel_clock_t *best_clock);
@@ -863,8 +867,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
        intel_clock_t clock;
        int max_n;
        bool found;
-       /* approximately equals target * 0.00488 */
-       int err_most = (target >> 8) + (target >> 10);
+       /* approximately equals target * 0.00585 */
+       int err_most = (target >> 8) + (target >> 9);
        found = false;
 
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -1123,6 +1127,67 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
        return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
+static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_framebuffer *fb = crtc->fb;
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA :
+                                              DPFC_CTL_PLANEB;
+       unsigned long stall_watermark = 200;
+       u32 dpfc_ctl;
+
+       dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+       dev_priv->cfb_fence = obj_priv->fence_reg;
+       dev_priv->cfb_plane = intel_crtc->plane;
+
+       dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+       dpfc_ctl &= DPFC_RESERVED;
+       dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
+       if (obj_priv->tiling_mode != I915_TILING_NONE) {
+               dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
+               I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
+       } else {
+               I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
+       }
+
+       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+       I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+                  (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+                  (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+       I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+       I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
+       /* enable it... */
+       I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) |
+                  DPFC_CTL_EN);
+
+       DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+void ironlake_disable_fbc(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 dpfc_ctl;
+
+       /* Disable compression */
+       dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+       dpfc_ctl &= ~DPFC_CTL_EN;
+       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+       intel_wait_for_vblank(dev);
+
+       DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static bool ironlake_fbc_enabled(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
 bool intel_fbc_enabled(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1181,8 +1246,12 @@ static void intel_update_fbc(struct drm_crtc *crtc,
        struct drm_framebuffer *fb = crtc->fb;
        struct intel_framebuffer *intel_fb;
        struct drm_i915_gem_object *obj_priv;
+       struct drm_crtc *tmp_crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int plane = intel_crtc->plane;
+       int crtcs_enabled = 0;
+
+       DRM_DEBUG_KMS("\n");
 
        if (!i915_powersave)
                return;
@@ -1200,10 +1269,21 @@ static void intel_update_fbc(struct drm_crtc *crtc,
         * If FBC is already on, we just have to verify that we can
         * keep it that way...
         * Need to disable if:
+        *   - more than one pipe is active
         *   - changing FBC params (stride, fence, mode)
         *   - new fb is too large to fit in compressed buffer
         *   - going to an unsupported config (interlace, pixel multiply, etc.)
         */
+       list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+               if (tmp_crtc->enabled)
+                       crtcs_enabled++;
+       }
+       DRM_DEBUG_KMS("%d pipes active\n", crtcs_enabled);
+       if (crtcs_enabled > 1) {
+               DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+               dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+               goto out_disable;
+       }
        if (intel_fb->obj->size > dev_priv->cfb_size) {
                DRM_DEBUG_KMS("framebuffer too large, disabling "
                                "compression\n");
@@ -1256,7 +1336,7 @@ out_disable:
        }
 }
 
-static int
+int
 intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
 {
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
@@ -1265,7 +1345,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
 
        switch (obj_priv->tiling_mode) {
        case I915_TILING_NONE:
-               alignment = 64 * 1024;
+               if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+                       alignment = 128 * 1024;
+               else if (IS_I965G(dev))
+                       alignment = 4 * 1024;
+               else
+                       alignment = 64 * 1024;
                break;
        case I915_TILING_X:
                /* pin() will align the object as required by fence */
@@ -1540,6 +1625,15 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
        int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
        u32 temp, tries = 0;
 
+       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+          for train result */
+       temp = I915_READ(fdi_rx_imr_reg);
+       temp &= ~FDI_RX_SYMBOL_LOCK;
+       temp &= ~FDI_RX_BIT_LOCK;
+       I915_WRITE(fdi_rx_imr_reg, temp);
+       I915_READ(fdi_rx_imr_reg);
+       udelay(150);
+
        /* enable CPU FDI TX and PCH FDI RX */
        temp = I915_READ(fdi_tx_reg);
        temp |= FDI_TX_ENABLE;
@@ -1557,16 +1651,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
        I915_READ(fdi_rx_reg);
        udelay(150);
 
-       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
-          for train result */
-       temp = I915_READ(fdi_rx_imr_reg);
-       temp &= ~FDI_RX_SYMBOL_LOCK;
-       temp &= ~FDI_RX_BIT_LOCK;
-       I915_WRITE(fdi_rx_imr_reg, temp);
-       I915_READ(fdi_rx_imr_reg);
-       udelay(150);
-
-       for (;;) {
+       for (tries = 0; tries < 5; tries++) {
                temp = I915_READ(fdi_rx_iir_reg);
                DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
@@ -1576,14 +1661,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
                                   temp | FDI_RX_BIT_LOCK);
                        break;
                }
-
-               tries++;
-
-               if (tries > 5) {
-                       DRM_DEBUG_KMS("FDI train 1 fail!\n");
-                       break;
-               }
        }
+       if (tries == 5)
+               DRM_DEBUG_KMS("FDI train 1 fail!\n");
 
        /* Train 2 */
        temp = I915_READ(fdi_tx_reg);
@@ -1599,7 +1679,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
 
        tries = 0;
 
-       for (;;) {
+       for (tries = 0; tries < 5; tries++) {
                temp = I915_READ(fdi_rx_iir_reg);
                DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
@@ -1609,14 +1689,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
                        DRM_DEBUG_KMS("FDI train 2 done.\n");
                        break;
                }
-
-               tries++;
-
-               if (tries > 5) {
-                       DRM_DEBUG_KMS("FDI train 2 fail!\n");
-                       break;
-               }
        }
+       if (tries == 5)
+               DRM_DEBUG_KMS("FDI train 2 fail!\n");
 
        DRM_DEBUG_KMS("FDI train done\n");
 }
@@ -1641,6 +1716,15 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
        int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
        u32 temp, i;
 
+       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+          for train result */
+       temp = I915_READ(fdi_rx_imr_reg);
+       temp &= ~FDI_RX_SYMBOL_LOCK;
+       temp &= ~FDI_RX_BIT_LOCK;
+       I915_WRITE(fdi_rx_imr_reg, temp);
+       I915_READ(fdi_rx_imr_reg);
+       udelay(150);
+
        /* enable CPU FDI TX and PCH FDI RX */
        temp = I915_READ(fdi_tx_reg);
        temp |= FDI_TX_ENABLE;
@@ -1666,15 +1750,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
        I915_READ(fdi_rx_reg);
        udelay(150);
 
-       /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
-          for train result */
-       temp = I915_READ(fdi_rx_imr_reg);
-       temp &= ~FDI_RX_SYMBOL_LOCK;
-       temp &= ~FDI_RX_BIT_LOCK;
-       I915_WRITE(fdi_rx_imr_reg, temp);
-       I915_READ(fdi_rx_imr_reg);
-       udelay(150);
-
        for (i = 0; i < 4; i++ ) {
                temp = I915_READ(fdi_tx_reg);
                temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -1829,7 +1904,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                }
 
                /* Enable panel fitting for LVDS */
-               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+                   || HAS_eDP || intel_pch_has_edp(crtc)) {
                        temp = I915_READ(pf_ctl_reg);
                        I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
 
@@ -1924,9 +2000,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                                reg = I915_READ(trans_dp_ctl);
                                reg &= ~TRANS_DP_PORT_SEL_MASK;
                                reg = TRANS_DP_OUTPUT_ENABLE |
-                                     TRANS_DP_ENH_FRAMING |
-                                     TRANS_DP_VSYNC_ACTIVE_HIGH |
-                                     TRANS_DP_HSYNC_ACTIVE_HIGH;
+                                     TRANS_DP_ENH_FRAMING;
+
+                               if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+                                     reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+                               if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+                                     reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
 
                                switch (intel_trans_dp_port_sel(crtc)) {
                                case PCH_DP_B:
@@ -1966,6 +2045,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
 
                intel_crtc_load_lut(crtc);
 
+               intel_update_fbc(crtc, &crtc->mode);
+
        break;
        case DRM_MODE_DPMS_OFF:
                DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
@@ -1980,6 +2061,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                        I915_READ(dspbase_reg);
                }
 
+               if (dev_priv->cfb_plane == plane &&
+                   dev_priv->display.disable_fbc)
+                       dev_priv->display.disable_fbc(dev);
+
                i915_disable_vga(dev);
 
                /* disable cpu pipe, disable after all planes disabled */
@@ -2256,6 +2341,11 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
                        intel_wait_for_vblank(dev);
                }
 
+               /* Don't disable pipe A or pipe A PLLs if needed */
+               if (pipeconf_reg == PIPEACONF &&
+                   (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+                       goto skip_pipe_off;
+
                /* Next, disable display pipes */
                temp = I915_READ(pipeconf_reg);
                if ((temp & PIPEACONF_ENABLE) != 0) {
@@ -2271,7 +2361,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
                        I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
                        I915_READ(dpll_reg);
                }
-
+       skip_pipe_off:
                /* Wait for the clocks to turn off. */
                udelay(150);
                break;
@@ -2354,11 +2444,9 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        if (HAS_PCH_SPLIT(dev)) {
                /* FDI link clock is fixed at 2.7G */
-               if (mode->clock * 3 > 27000 * 4)
-                       return MODE_CLOCK_HIGH;
+               if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
+                       return false;
        }
-
-       drm_mode_set_crtcinfo(adjusted_mode, 0);
        return true;
 }
 
@@ -2539,6 +2627,20 @@ static struct intel_watermark_params g4x_wm_info = {
        2,
        G4X_FIFO_LINE_SIZE,
 };
+static struct intel_watermark_params g4x_cursor_wm_info = {
+       I965_CURSOR_FIFO,
+       I965_CURSOR_MAX_WM,
+       I965_CURSOR_DFT_WM,
+       2,
+       G4X_FIFO_LINE_SIZE,
+};
+static struct intel_watermark_params i965_cursor_wm_info = {
+       I965_CURSOR_FIFO,
+       I965_CURSOR_MAX_WM,
+       I965_CURSOR_DFT_WM,
+       2,
+       I915_FIFO_LINE_SIZE,
+};
 static struct intel_watermark_params i945_wm_info = {
        I945_FIFO_SIZE,
        I915_MAX_WM,
@@ -2576,6 +2678,14 @@ static struct intel_watermark_params ironlake_display_wm_info = {
        ILK_FIFO_LINE_SIZE
 };
 
+static struct intel_watermark_params ironlake_cursor_wm_info = {
+       ILK_CURSOR_FIFO,
+       ILK_CURSOR_MAXWM,
+       ILK_CURSOR_DFTWM,
+       2,
+       ILK_FIFO_LINE_SIZE
+};
+
 static struct intel_watermark_params ironlake_display_srwm_info = {
        ILK_DISPLAY_SR_FIFO,
        ILK_DISPLAY_MAX_SRWM,
@@ -2625,7 +2735,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
         */
        entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
                1000;
-       entries_required /= wm->cacheline_size;
+       entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
 
        DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
 
@@ -2636,8 +2746,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
        /* Don't promote wm_size to unsigned... */
        if (wm_size > (long)wm->max_wm)
                wm_size = wm->max_wm;
-       if (wm_size <= 0)
+       if (wm_size <= 0) {
                wm_size = wm->default_wm;
+               DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
+                         " entries required = %ld, available = %lu.\n",
+                         entries_required + wm->guard_size,
+                         wm->fifo_size);
+       }
+
        return wm_size;
 }
 
@@ -2746,11 +2862,9 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
        uint32_t dsparb = I915_READ(DSPARB);
        int size;
 
-       if (plane == 0)
-               size = dsparb & 0x7f;
-       else
-               size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
-                       (dsparb & 0x7f);
+       size = dsparb & 0x7f;
+       if (plane)
+               size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
 
        DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
                        plane ? "B" : "A", size);
@@ -2764,11 +2878,9 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
        uint32_t dsparb = I915_READ(DSPARB);
        int size;
 
-       if (plane == 0)
-               size = dsparb & 0x1ff;
-       else
-               size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
-                       (dsparb & 0x1ff);
+       size = dsparb & 0x1ff;
+       if (plane)
+               size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
        size >>= 1; /* Convert to cachelines */
 
        DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
@@ -2809,7 +2921,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
 }
 
 static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
-                         int planeb_clock, int sr_hdisplay, int pixel_size)
+                         int planeb_clock, int sr_hdisplay, int unused,
+                         int pixel_size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 reg;
@@ -2874,7 +2987,8 @@ static void pineview_update_wm(struct drm_device *dev,  int planea_clock,
 }
 
 static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
-                         int planeb_clock, int sr_hdisplay, int pixel_size)
+                         int planeb_clock, int sr_hdisplay, int sr_htotal,
+                         int pixel_size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int total_size, cacheline_size;
@@ -2898,12 +3012,12 @@ static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
         */
        entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
                1000;
-       entries_required /= G4X_FIFO_LINE_SIZE;
+       entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
        planea_wm = entries_required + planea_params.guard_size;
 
        entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
                1000;
-       entries_required /= G4X_FIFO_LINE_SIZE;
+       entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
        planeb_wm = entries_required + planeb_params.guard_size;
 
        cursora_wm = cursorb_wm = 16;
@@ -2917,13 +3031,24 @@ static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
                static const int sr_latency_ns = 12000;
 
                sr_clock = planea_clock ? planea_clock : planeb_clock;
-               line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+               line_time_us = ((sr_htotal * 1000) / sr_clock);
 
                /* Use ns/us then divide to preserve precision */
-               sr_entries = (((sr_latency_ns / line_time_us) + 1) *
-                             pixel_size * sr_hdisplay) / 1000;
-               sr_entries = roundup(sr_entries / cacheline_size, 1);
-               DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+               sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+                             pixel_size * sr_hdisplay;
+               sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
+
+               entries_required = (((sr_latency_ns / line_time_us) +
+                                    1000) / 1000) * pixel_size * 64;
+               entries_required = DIV_ROUND_UP(entries_required,
+                                          g4x_cursor_wm_info.cacheline_size);
+               cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
+
+               if (cursor_sr > g4x_cursor_wm_info.max_wm)
+                       cursor_sr = g4x_cursor_wm_info.max_wm;
+               DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+                             "cursor %d\n", sr_entries, cursor_sr);
+
                I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
        } else {
                /* Turn off self refresh if both pipes are enabled */
@@ -2948,11 +3073,13 @@ static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
 }
 
 static void i965_update_wm(struct drm_device *dev, int planea_clock,
-                          int planeb_clock, int sr_hdisplay, int pixel_size)
+                          int planeb_clock, int sr_hdisplay, int sr_htotal,
+                          int pixel_size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long line_time_us;
        int sr_clock, sr_entries, srwm = 1;
+       int cursor_sr = 16;
 
        /* Calc sr entries for one plane configs */
        if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
@@ -2960,17 +3087,31 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
                static const int sr_latency_ns = 12000;
 
                sr_clock = planea_clock ? planea_clock : planeb_clock;
-               line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+               line_time_us = ((sr_htotal * 1000) / sr_clock);
 
                /* Use ns/us then divide to preserve precision */
-               sr_entries = (((sr_latency_ns / line_time_us) + 1) *
-                             pixel_size * sr_hdisplay) / 1000;
-               sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
+               sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+                             pixel_size * sr_hdisplay;
+               sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
                DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
-               srwm = I945_FIFO_SIZE - sr_entries;
+               srwm = I965_FIFO_SIZE - sr_entries;
                if (srwm < 0)
                        srwm = 1;
-               srwm &= 0x3f;
+               srwm &= 0x1ff;
+
+               sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+                            pixel_size * 64;
+               sr_entries = DIV_ROUND_UP(sr_entries,
+                                         i965_cursor_wm_info.cacheline_size);
+               cursor_sr = i965_cursor_wm_info.fifo_size -
+                           (sr_entries + i965_cursor_wm_info.guard_size);
+
+               if (cursor_sr > i965_cursor_wm_info.max_wm)
+                       cursor_sr = i965_cursor_wm_info.max_wm;
+
+               DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+                             "cursor %d\n", srwm, cursor_sr);
+
                if (IS_I965GM(dev))
                        I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
        } else {
@@ -2987,10 +3128,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
        I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
                   (8 << 0));
        I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+       /* update cursor SR watermark */
+       I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
 static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
-                          int planeb_clock, int sr_hdisplay, int pixel_size)
+                          int planeb_clock, int sr_hdisplay, int sr_htotal,
+                          int pixel_size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t fwater_lo;
@@ -3035,12 +3179,12 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
                static const int sr_latency_ns = 6000;
 
                sr_clock = planea_clock ? planea_clock : planeb_clock;
-               line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+               line_time_us = ((sr_htotal * 1000) / sr_clock);
 
                /* Use ns/us then divide to preserve precision */
-               sr_entries = (((sr_latency_ns / line_time_us) + 1) *
-                             pixel_size * sr_hdisplay) / 1000;
-               sr_entries = roundup(sr_entries / cacheline_size, 1);
+               sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+                             pixel_size * sr_hdisplay;
+               sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
                DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
                srwm = total_size - sr_entries;
                if (srwm < 0)
@@ -3078,7 +3222,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
 }
 
 static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
-                          int unused2, int pixel_size)
+                          int unused2, int unused3, int pixel_size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -3096,9 +3240,11 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
 }
 
 #define ILK_LP0_PLANE_LATENCY          700
+#define ILK_LP0_CURSOR_LATENCY         1300
 
 static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
-                      int planeb_clock, int sr_hdisplay, int pixel_size)
+                      int planeb_clock, int sr_hdisplay, int sr_htotal,
+                      int pixel_size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -3106,20 +3252,48 @@ static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
        unsigned long line_time_us;
        int sr_clock, entries_required;
        u32 reg_value;
+       int line_count;
+       int planea_htotal = 0, planeb_htotal = 0;
+       struct drm_crtc *crtc;
+       struct intel_crtc *intel_crtc;
+
+       /* Need htotal for all active display plane */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               intel_crtc = to_intel_crtc(crtc);
+               if (crtc->enabled) {
+                       if (intel_crtc->plane == 0)
+                               planea_htotal = crtc->mode.htotal;
+                       else
+                               planeb_htotal = crtc->mode.htotal;
+               }
+       }
 
        /* Calculate and update the watermark for plane A */
        if (planea_clock) {
                entries_required = ((planea_clock / 1000) * pixel_size *
                                     ILK_LP0_PLANE_LATENCY) / 1000;
                entries_required = DIV_ROUND_UP(entries_required,
-                                  ironlake_display_wm_info.cacheline_size);
+                                               ironlake_display_wm_info.cacheline_size);
                planea_wm = entries_required +
                            ironlake_display_wm_info.guard_size;
 
                if (planea_wm > (int)ironlake_display_wm_info.max_wm)
                        planea_wm = ironlake_display_wm_info.max_wm;
 
-               cursora_wm = 16;
+               /* Use the large buffer method to calculate cursor watermark */
+               line_time_us = (planea_htotal * 1000) / planea_clock;
+
+               /* Use ns/us then divide to preserve precision */
+               line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+
+               /* calculate the cursor watermark for cursor A */
+               entries_required = line_count * 64 * pixel_size;
+               entries_required = DIV_ROUND_UP(entries_required,
+                                               ironlake_cursor_wm_info.cacheline_size);
+               cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
+               if (cursora_wm > ironlake_cursor_wm_info.max_wm)
+                       cursora_wm = ironlake_cursor_wm_info.max_wm;
+
                reg_value = I915_READ(WM0_PIPEA_ILK);
                reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
                reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
@@ -3133,14 +3307,27 @@ static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
                entries_required = ((planeb_clock / 1000) * pixel_size *
                                     ILK_LP0_PLANE_LATENCY) / 1000;
                entries_required = DIV_ROUND_UP(entries_required,
-                                  ironlake_display_wm_info.cacheline_size);
+                                               ironlake_display_wm_info.cacheline_size);
                planeb_wm = entries_required +
                            ironlake_display_wm_info.guard_size;
 
                if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
                        planeb_wm = ironlake_display_wm_info.max_wm;
 
-               cursorb_wm = 16;
+               /* Use the large buffer method to calculate cursor watermark */
+               line_time_us = (planeb_htotal * 1000) / planeb_clock;
+
+               /* Use ns/us then divide to preserve precision */
+               line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+
+               /* calculate the cursor watermark for cursor B */
+               entries_required = line_count * 64 * pixel_size;
+               entries_required = DIV_ROUND_UP(entries_required,
+                                               ironlake_cursor_wm_info.cacheline_size);
+               cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size;
+               if (cursorb_wm > ironlake_cursor_wm_info.max_wm)
+                       cursorb_wm = ironlake_cursor_wm_info.max_wm;
+
                reg_value = I915_READ(WM0_PIPEB_ILK);
                reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
                reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
@@ -3155,12 +3342,12 @@ static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
         * display plane is used.
         */
        if (!planea_clock || !planeb_clock) {
-               int line_count;
+
                /* Read the self-refresh latency. The unit is 0.5us */
                int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
 
                sr_clock = planea_clock ? planea_clock : planeb_clock;
-               line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+               line_time_us = ((sr_htotal * 1000) / sr_clock);
 
                /* Use ns/us then divide to preserve precision */
                line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
@@ -3169,14 +3356,14 @@ static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
                /* calculate the self-refresh watermark for display plane */
                entries_required = line_count * sr_hdisplay * pixel_size;
                entries_required = DIV_ROUND_UP(entries_required,
-                                  ironlake_display_srwm_info.cacheline_size);
+                                               ironlake_display_srwm_info.cacheline_size);
                sr_wm = entries_required +
                        ironlake_display_srwm_info.guard_size;
 
                /* calculate the self-refresh watermark for display cursor */
                entries_required = line_count * pixel_size * 64;
                entries_required = DIV_ROUND_UP(entries_required,
-                                  ironlake_cursor_srwm_info.cacheline_size);
+                                               ironlake_cursor_srwm_info.cacheline_size);
                cursor_wm = entries_required +
                            ironlake_cursor_srwm_info.guard_size;
 
@@ -3220,6 +3407,7 @@ static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
  *       bytes per pixel
  *   where
  *     line time = htotal / dotclock
+ *     surface width = hdisplay for normal plane and 64 for cursor
  *   and latency is assumed to be high, as above.
  *
  * The final value programmed to the register should always be rounded up,
@@ -3236,6 +3424,7 @@ static void intel_update_watermarks(struct drm_device *dev)
        int sr_hdisplay = 0;
        unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
        int enabled = 0, pixel_size = 0;
+       int sr_htotal = 0;
 
        if (!dev_priv->display.update_wm)
                return;
@@ -3256,6 +3445,7 @@ static void intel_update_watermarks(struct drm_device *dev)
                        }
                        sr_hdisplay = crtc->mode.hdisplay;
                        sr_clock = crtc->mode.clock;
+                       sr_htotal = crtc->mode.htotal;
                        if (crtc->fb)
                                pixel_size = crtc->fb->bits_per_pixel / 8;
                        else
@@ -3267,7 +3457,7 @@ static void intel_update_watermarks(struct drm_device *dev)
                return;
 
        dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
-                                   sr_hdisplay, pixel_size);
+                                   sr_hdisplay, sr_htotal, pixel_size);
 }
 
 static int intel_crtc_mode_set(struct drm_crtc *crtc,
@@ -3386,6 +3576,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
+       /* Ensure that the cursor is valid for the new mode before changing... */
+       intel_crtc_update_cursor(crtc);
+
        if (is_lvds && dev_priv->lvds_downclock_avail) {
                has_reduced_clock = limit->find_pll(limit, crtc,
                                                            dev_priv->lvds_downclock,
@@ -3452,7 +3645,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                                temp |= PIPE_8BPC;
                        else
                                temp |= PIPE_6BPC;
-               } else if (is_edp) {
+               } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) {
                        switch (dev_priv->edp_bpp/3) {
                        case 8:
                                temp |= PIPE_8BPC;
@@ -3695,6 +3888,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                udelay(150);
        }
 
+       if (HAS_PCH_SPLIT(dev)) {
+               pipeconf &= ~PIPE_ENABLE_DITHER;
+               pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+       }
+
        /* The LVDS pin pair needs to be on before the DPLLs are enabled.
         * This is an exception to the general rule that mode_set doesn't turn
         * things on.
@@ -3741,11 +3939,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                                } else
                                        lvds |= LVDS_ENABLE_DITHER;
                        } else {
-                               if (HAS_PCH_SPLIT(dev)) {
-                                       pipeconf &= ~PIPE_ENABLE_DITHER;
-                                       pipeconf &= ~PIPE_DITHER_TYPE_MASK;
-                               } else
+                               if (!HAS_PCH_SPLIT(dev)) {
                                        lvds &= ~LVDS_ENABLE_DITHER;
+                               }
                        }
                }
                I915_WRITE(lvds_reg, lvds);
@@ -3921,6 +4117,85 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
        }
 }
 
+/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+static void intel_crtc_update_cursor(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int x = intel_crtc->cursor_x;
+       int y = intel_crtc->cursor_y;
+       uint32_t base, pos;
+       bool visible;
+
+       pos = 0;
+
+       if (crtc->fb) {
+               base = intel_crtc->cursor_addr;
+               if (x > (int) crtc->fb->width)
+                       base = 0;
+
+               if (y > (int) crtc->fb->height)
+                       base = 0;
+       } else
+               base = 0;
+
+       if (x < 0) {
+               if (x + intel_crtc->cursor_width < 0)
+                       base = 0;
+
+               pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+               x = -x;
+       }
+       pos |= x << CURSOR_X_SHIFT;
+
+       if (y < 0) {
+               if (y + intel_crtc->cursor_height < 0)
+                       base = 0;
+
+               pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+               y = -y;
+       }
+       pos |= y << CURSOR_Y_SHIFT;
+
+       visible = base != 0;
+       if (!visible && !intel_crtc->cursor_visble)
+               return;
+
+       I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
+       if (intel_crtc->cursor_visble != visible) {
+               uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
+               if (base) {
+                       /* Hooray for CUR*CNTR differences */
+                       if (IS_MOBILE(dev) || IS_I9XX(dev)) {
+                               cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
+                               cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+                               cntl |= pipe << 28; /* Connect to correct pipe */
+                       } else {
+                               cntl &= ~(CURSOR_FORMAT_MASK);
+                               cntl |= CURSOR_ENABLE;
+                               cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
+                       }
+               } else {
+                       if (IS_MOBILE(dev) || IS_I9XX(dev)) {
+                               cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+                               cntl |= CURSOR_MODE_DISABLE;
+                       } else {
+                               cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
+                       }
+               }
+               I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
+
+               intel_crtc->cursor_visble = visible;
+       }
+       /* and commit changes on next vblank */
+       I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
+
+       if (visible)
+               intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
+}
+
 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
                                 struct drm_file *file_priv,
                                 uint32_t handle,
@@ -3931,11 +4206,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_gem_object *bo;
        struct drm_i915_gem_object *obj_priv;
-       int pipe = intel_crtc->pipe;
-       uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
-       uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
-       uint32_t temp = I915_READ(control);
-       size_t addr;
+       uint32_t addr;
        int ret;
 
        DRM_DEBUG_KMS("\n");
@@ -3943,12 +4214,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
        /* if we want to turn off the cursor ignore width and height */
        if (!handle) {
                DRM_DEBUG_KMS("cursor off\n");
-               if (IS_MOBILE(dev) || IS_I9XX(dev)) {
-                       temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
-                       temp |= CURSOR_MODE_DISABLE;
-               } else {
-                       temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
-               }
                addr = 0;
                bo = NULL;
                mutex_lock(&dev->struct_mutex);
@@ -3990,7 +4255,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 
                addr = obj_priv->gtt_offset;
        } else {
-               ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
+               ret = i915_gem_attach_phys_object(dev, bo,
+                                                 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
                if (ret) {
                        DRM_ERROR("failed to attach phys object\n");
                        goto fail_locked;
@@ -4001,21 +4267,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
        if (!IS_I9XX(dev))
                I915_WRITE(CURSIZE, (height << 12) | width);
 
-       /* Hooray for CUR*CNTR differences */
-       if (IS_MOBILE(dev) || IS_I9XX(dev)) {
-               temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
-               temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
-               temp |= (pipe << 28); /* Connect to correct pipe */
-       } else {
-               temp &= ~(CURSOR_FORMAT_MASK);
-               temp |= CURSOR_ENABLE;
-               temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
-       }
-
  finish:
-       I915_WRITE(control, temp);
-       I915_WRITE(base, addr);
-
        if (intel_crtc->cursor_bo) {
                if (dev_priv->info->cursor_needs_physical) {
                        if (intel_crtc->cursor_bo != bo)
@@ -4029,6 +4281,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 
        intel_crtc->cursor_addr = addr;
        intel_crtc->cursor_bo = bo;
+       intel_crtc->cursor_width = width;
+       intel_crtc->cursor_height = height;
+
+       intel_crtc_update_cursor(crtc);
 
        return 0;
 fail_unpin:
@@ -4042,34 +4298,12 @@ fail:
 
 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
 {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_framebuffer *intel_fb;
-       int pipe = intel_crtc->pipe;
-       uint32_t temp = 0;
-       uint32_t adder;
-
-       if (crtc->fb) {
-               intel_fb = to_intel_framebuffer(crtc->fb);
-               intel_mark_busy(dev, intel_fb->obj);
-       }
-
-       if (x < 0) {
-               temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
-               x = -x;
-       }
-       if (y < 0) {
-               temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
-               y = -y;
-       }
 
-       temp |= x << CURSOR_X_SHIFT;
-       temp |= y << CURSOR_Y_SHIFT;
+       intel_crtc->cursor_x = x;
+       intel_crtc->cursor_y = y;
 
-       adder = intel_crtc->cursor_addr;
-       I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
-       I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
+       intel_crtc_update_cursor(crtc);
 
        return 0;
 }
@@ -4413,7 +4647,8 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
                DRM_DEBUG_DRIVER("upclocking LVDS\n");
 
                /* Unlock panel regs */
-               I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+               I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+                          PANEL_UNLOCK_REGS);
 
                dpll &= ~DISPLAY_RATE_SELECT_FPA1;
                I915_WRITE(dpll_reg, dpll);
@@ -4456,7 +4691,8 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
                DRM_DEBUG_DRIVER("downclocking LVDS\n");
 
                /* Unlock panel regs */
-               I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+               I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+                          PANEL_UNLOCK_REGS);
 
                dpll |= DISPLAY_RATE_SELECT_FPA1;
                I915_WRITE(dpll_reg, dpll);
@@ -4698,7 +4934,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        struct drm_gem_object *obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
-       unsigned long flags;
+       unsigned long flags, offset;
        int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
        int ret, pipesrc;
        u32 flip_mask;
@@ -4730,27 +4966,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        mutex_lock(&dev->struct_mutex);
        ret = intel_pin_and_fence_fb_obj(dev, obj);
-       if (ret != 0) {
-               mutex_unlock(&dev->struct_mutex);
-
-               spin_lock_irqsave(&dev->event_lock, flags);
-               intel_crtc->unpin_work = NULL;
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-
-               kfree(work);
-
-               DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
-                                to_intel_bo(obj));
-               return ret;
-       }
+       if (ret)
+               goto cleanup_work;
 
        /* Reference the objects for the scheduled work. */
        drm_gem_object_reference(work->old_fb_obj);
        drm_gem_object_reference(obj);
 
        crtc->fb = fb;
-       i915_gem_object_flush_write_domain(obj);
-       drm_vblank_get(dev, intel_crtc->pipe);
+       ret = i915_gem_object_flush_write_domain(obj);
+       if (ret)
+               goto cleanup_objs;
+
+       ret = drm_vblank_get(dev, intel_crtc->pipe);
+       if (ret)
+               goto cleanup_objs;
+
        obj_priv = to_intel_bo(obj);
        atomic_inc(&obj_priv->pending_flip);
        work->pending_flip_obj = obj;
@@ -4765,19 +4996,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                while (I915_READ(ISR) & flip_mask)
                        ;
 
+       /* Offset into the new buffer for cases of shared fbs between CRTCs */
+       offset = obj_priv->gtt_offset;
+       offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
+
        BEGIN_LP_RING(4);
        if (IS_I965G(dev)) {
                OUT_RING(MI_DISPLAY_FLIP |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
                OUT_RING(fb->pitch);
-               OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+               OUT_RING(offset | obj_priv->tiling_mode);
                pipesrc = I915_READ(pipesrc_reg); 
                OUT_RING(pipesrc & 0x0fff0fff);
        } else {
                OUT_RING(MI_DISPLAY_FLIP_I915 |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
                OUT_RING(fb->pitch);
-               OUT_RING(obj_priv->gtt_offset);
+               OUT_RING(offset);
                OUT_RING(MI_NOOP);
        }
        ADVANCE_LP_RING();
@@ -4787,6 +5022,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        trace_i915_flip_request(intel_crtc->plane, obj);
 
        return 0;
+
+cleanup_objs:
+       drm_gem_object_unreference(work->old_fb_obj);
+       drm_gem_object_unreference(obj);
+cleanup_work:
+       mutex_unlock(&dev->struct_mutex);
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       intel_crtc->unpin_work = NULL;
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       kfree(work);
+
+       return ret;
 }
 
 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
@@ -4912,19 +5161,26 @@ static void intel_setup_outputs(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_encoder *encoder;
+       bool dpd_is_edp = false;
 
-       intel_crt_init(dev);
-
-       /* Set up integrated LVDS */
        if (IS_MOBILE(dev) && !IS_I830(dev))
                intel_lvds_init(dev);
 
        if (HAS_PCH_SPLIT(dev)) {
-               int found;
+               dpd_is_edp = intel_dpd_is_edp(dev);
 
                if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
                        intel_dp_init(dev, DP_A);
 
+               if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
+                       intel_dp_init(dev, PCH_DP_D);
+       }
+
+       intel_crt_init(dev);
+
+       if (HAS_PCH_SPLIT(dev)) {
+               int found;
+
                if (I915_READ(HDMIB) & PORT_DETECTED) {
                        /* PCH SDVOB multiplex with HDMIB */
                        found = intel_sdvo_init(dev, PCH_SDVOB);
@@ -4943,7 +5199,7 @@ static void intel_setup_outputs(struct drm_device *dev)
                if (I915_READ(PCH_DP_C) & DP_DETECTED)
                        intel_dp_init(dev, PCH_DP_C);
 
-               if (I915_READ(PCH_DP_D) & DP_DETECTED)
+               if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
                        intel_dp_init(dev, PCH_DP_D);
 
        } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
@@ -5352,6 +5608,26 @@ void intel_init_clock_gating(struct drm_device *dev)
                                        (I915_READ(DISP_ARB_CTL) |
                                                DISP_FBC_WM_DIS));
                }
+               /*
+                * Based on the document from hardware guys the following bits
+                * should be set unconditionally in order to enable FBC.
+                * The bit 22 of 0x42000
+                * The bit 22 of 0x42004
+                * The bit 7,8,9 of 0x42020.
+                */
+               if (IS_IRONLAKE_M(dev)) {
+                       I915_WRITE(ILK_DISPLAY_CHICKEN1,
+                                  I915_READ(ILK_DISPLAY_CHICKEN1) |
+                                  ILK_FBCQ_DIS);
+                       I915_WRITE(ILK_DISPLAY_CHICKEN2,
+                                  I915_READ(ILK_DISPLAY_CHICKEN2) |
+                                  ILK_DPARB_GATE);
+                       I915_WRITE(ILK_DSPCLK_GATE,
+                                  I915_READ(ILK_DSPCLK_GATE) |
+                                  ILK_DPFC_DIS1 |
+                                  ILK_DPFC_DIS2 |
+                                  ILK_CLK_FBC);
+               }
                return;
        } else if (IS_G4X(dev)) {
                uint32_t dspclk_gate;
@@ -5430,7 +5706,11 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.dpms = i9xx_crtc_dpms;
 
        if (I915_HAS_FBC(dev)) {
-               if (IS_GM45(dev)) {
+               if (IS_IRONLAKE_M(dev)) {
+                       dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+                       dev_priv->display.enable_fbc = ironlake_enable_fbc;
+                       dev_priv->display.disable_fbc = ironlake_disable_fbc;
+               } else if (IS_GM45(dev)) {
                        dev_priv->display.fbc_enabled = g4x_fbc_enabled;
                        dev_priv->display.enable_fbc = g4x_enable_fbc;
                        dev_priv->display.disable_fbc = g4x_disable_fbc;
@@ -5511,6 +5791,66 @@ static void intel_init_display(struct drm_device *dev)
        }
 }
 
+/*
+ * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
+ * resume, or other times.  This quirk makes sure that's the case for
+ * affected systems.
+ */
+static void quirk_pipea_force (struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       dev_priv->quirks |= QUIRK_PIPEA_FORCE;
+       DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
+}
+
+struct intel_quirk {
+       int device;
+       int subsystem_vendor;
+       int subsystem_device;
+       void (*hook)(struct drm_device *dev);
+};
+
+struct intel_quirk intel_quirks[] = {
+       /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
+       { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
+       /* HP Mini needs pipe A force quirk (LP: #322104) */
+       { 0x27ae,0x103c, 0x361a, quirk_pipea_force },
+
+       /* Thinkpad R31 needs pipe A force quirk */
+       { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
+       /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
+       { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
+
+       /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
+       { 0x3577,  0x1014, 0x0513, quirk_pipea_force },
+       /* ThinkPad X40 needs pipe A force quirk */
+
+       /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
+       { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
+
+       /* 855 & before need to leave pipe A & dpll A up */
+       { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+       { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+};
+
+static void intel_init_quirks(struct drm_device *dev)
+{
+       struct pci_dev *d = dev->pdev;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+               struct intel_quirk *q = &intel_quirks[i];
+
+               if (d->device == q->device &&
+                   (d->subsystem_vendor == q->subsystem_vendor ||
+                    q->subsystem_vendor == PCI_ANY_ID) &&
+                   (d->subsystem_device == q->subsystem_device ||
+                    q->subsystem_device == PCI_ANY_ID))
+                       q->hook(dev);
+       }
+}
+
 void intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5523,6 +5863,8 @@ void intel_modeset_init(struct drm_device *dev)
 
        dev->mode_config.funcs = (void *)&intel_mode_funcs;
 
+       intel_init_quirks(dev);
+
        intel_init_display(dev);
 
        if (IS_I965G(dev)) {
index 1aac59e83bff0c45a0da94e1f083d40d9e30b396..40be1fa65be18fa730347b3da0f248a2b35b19e6 100644 (file)
@@ -43,6 +43,7 @@
 #define DP_LINK_CONFIGURATION_SIZE     9
 
 #define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP)
+#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp)
 
 struct intel_dp_priv {
        uint32_t output_reg;
@@ -56,6 +57,7 @@ struct intel_dp_priv {
        struct intel_encoder *intel_encoder;
        struct i2c_adapter adapter;
        struct i2c_algo_dp_aux_data algo;
+       bool is_pch_edp;
 };
 
 static void
@@ -128,8 +130,9 @@ intel_dp_link_required(struct drm_device *dev,
                       struct intel_encoder *intel_encoder, int pixel_clock)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
 
-       if (IS_eDP(intel_encoder))
+       if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv))
                return (pixel_clock * dev_priv->edp_bpp) / 8;
        else
                return pixel_clock * 3;
@@ -147,9 +150,21 @@ intel_dp_mode_valid(struct drm_connector *connector,
 {
        struct drm_encoder *encoder = intel_attached_encoder(connector);
        struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+       struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
        int max_lanes = intel_dp_max_lane_count(intel_encoder);
 
+       if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
+           dev_priv->panel_fixed_mode) {
+               if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
+                       return MODE_PANEL;
+
+               if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay)
+                       return MODE_PANEL;
+       }
+
        /* only refuse the mode on non eDP since we have seen some wierd eDP panels
           which are outside spec tolerances but somehow work by magic */
        if (!IS_eDP(intel_encoder) &&
@@ -508,11 +523,37 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
 {
        struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct intel_dp_priv   *dp_priv = intel_encoder->dev_priv;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        int lane_count, clock;
        int max_lane_count = intel_dp_max_lane_count(intel_encoder);
        int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
        static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
+       if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
+           dev_priv->panel_fixed_mode) {
+               struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
+
+               adjusted_mode->hdisplay = fixed_mode->hdisplay;
+               adjusted_mode->hsync_start = fixed_mode->hsync_start;
+               adjusted_mode->hsync_end = fixed_mode->hsync_end;
+               adjusted_mode->htotal = fixed_mode->htotal;
+
+               adjusted_mode->vdisplay = fixed_mode->vdisplay;
+               adjusted_mode->vsync_start = fixed_mode->vsync_start;
+               adjusted_mode->vsync_end = fixed_mode->vsync_end;
+               adjusted_mode->vtotal = fixed_mode->vtotal;
+
+               adjusted_mode->clock = fixed_mode->clock;
+               drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+
+               /*
+                * the mode->clock is used to calculate the Data&Link M/N
+                * of the pipe. For the eDP the fixed clock should be used.
+                */
+               mode->clock = dev_priv->panel_fixed_mode->clock;
+       }
+
        for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
                for (clock = 0; clock <= max_clock; clock++) {
                        int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -531,7 +572,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
                }
        }
 
-       if (IS_eDP(intel_encoder)) {
+       if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
                /* okay we failed just pick the highest */
                dp_priv->lane_count = max_lane_count;
                dp_priv->link_bw = bws[max_clock];
@@ -563,14 +604,14 @@ intel_reduce_ratio(uint32_t *num, uint32_t *den)
 }
 
 static void
-intel_dp_compute_m_n(int bytes_per_pixel,
+intel_dp_compute_m_n(int bpp,
                     int nlanes,
                     int pixel_clock,
                     int link_clock,
                     struct intel_dp_m_n *m_n)
 {
        m_n->tu = 64;
-       m_n->gmch_m = pixel_clock * bytes_per_pixel;
+       m_n->gmch_m = (pixel_clock * bpp) >> 3;
        m_n->gmch_n = link_clock * nlanes;
        intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
        m_n->link_m = pixel_clock;
@@ -578,6 +619,28 @@ intel_dp_compute_m_n(int bytes_per_pixel,
        intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
 }
 
+bool intel_pch_has_edp(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_encoder *encoder;
+
+       list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+               struct intel_encoder *intel_encoder;
+               struct intel_dp_priv *dp_priv;
+
+               if (!encoder || encoder->crtc != crtc)
+                       continue;
+
+               intel_encoder = enc_to_intel_encoder(encoder);
+               dp_priv = intel_encoder->dev_priv;
+
+               if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT)
+                       return dp_priv->is_pch_edp;
+       }
+       return false;
+}
+
 void
 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
                 struct drm_display_mode *adjusted_mode)
@@ -587,7 +650,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
        struct drm_encoder *encoder;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int lane_count = 4;
+       int lane_count = 4, bpp = 24;
        struct intel_dp_m_n m_n;
 
        /*
@@ -605,6 +668,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 
                if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
                        lane_count = dp_priv->lane_count;
+                       if (IS_PCH_eDP(dp_priv))
+                               bpp = dev_priv->edp_bpp;
                        break;
                }
        }
@@ -614,7 +679,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
         * the number of bytes_per_pixel post-LUT, which we always
         * set up for 8-bits of R/G/B, or 3 bytes total.
         */
-       intel_dp_compute_m_n(3, lane_count,
+       intel_dp_compute_m_n(bpp, lane_count,
                             mode->clock, adjusted_mode->clock, &m_n);
 
        if (HAS_PCH_SPLIT(dev)) {
@@ -717,6 +782,51 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        }
 }
 
+static void ironlake_edp_panel_on (struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+       u32 pp, pp_status;
+
+       pp_status = I915_READ(PCH_PP_STATUS);
+       if (pp_status & PP_ON)
+               return;
+
+       pp = I915_READ(PCH_PP_CONTROL);
+       pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
+       I915_WRITE(PCH_PP_CONTROL, pp);
+       do {
+               pp_status = I915_READ(PCH_PP_STATUS);
+       } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
+
+       if (time_after(jiffies, timeout))
+               DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
+
+       pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
+       I915_WRITE(PCH_PP_CONTROL, pp);
+}
+
+static void ironlake_edp_panel_off (struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+       u32 pp, pp_status;
+
+       pp = I915_READ(PCH_PP_CONTROL);
+       pp &= ~POWER_TARGET_ON;
+       I915_WRITE(PCH_PP_CONTROL, pp);
+       do {
+               pp_status = I915_READ(PCH_PP_STATUS);
+       } while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
+
+       if (time_after(jiffies, timeout))
+               DRM_DEBUG_KMS("panel off wait timed out\n");
+
+       /* Make sure VDD is enabled so DP AUX will work */
+       pp |= EDP_FORCE_VDD;
+       I915_WRITE(PCH_PP_CONTROL, pp);
+}
+
 static void ironlake_edp_backlight_on (struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -751,14 +861,18 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
        if (mode != DRM_MODE_DPMS_ON) {
                if (dp_reg & DP_PORT_EN) {
                        intel_dp_link_down(intel_encoder, dp_priv->DP);
-                       if (IS_eDP(intel_encoder))
+                       if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
                                ironlake_edp_backlight_off(dev);
+                               ironlake_edp_panel_off(dev);
+                       }
                }
        } else {
                if (!(dp_reg & DP_PORT_EN)) {
                        intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
-                       if (IS_eDP(intel_encoder))
+                       if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
+                               ironlake_edp_panel_on(dev);
                                ironlake_edp_backlight_on(dev);
+                       }
                }
        }
        dp_priv->dpms_mode = mode;
@@ -1291,17 +1405,32 @@ static int intel_dp_get_modes(struct drm_connector *connector)
        struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
        struct drm_device *dev = intel_encoder->enc.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
        int ret;
 
        /* We should parse the EDID data and find out if it has an audio sink
         */
 
        ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
-       if (ret)
+       if (ret) {
+               if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
+                   !dev_priv->panel_fixed_mode) {
+                       struct drm_display_mode *newmode;
+                       list_for_each_entry(newmode, &connector->probed_modes,
+                                           head) {
+                               if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+                                       dev_priv->panel_fixed_mode =
+                                               drm_mode_duplicate(dev, newmode);
+                                       break;
+                               }
+                       }
+               }
+
                return ret;
+       }
 
        /* if eDP has no EDID, try to use fixed panel mode from VBT */
-       if (IS_eDP(intel_encoder)) {
+       if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
                if (dev_priv->panel_fixed_mode != NULL) {
                        struct drm_display_mode *mode;
                        mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1386,6 +1515,26 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
        return -1;
 }
 
+/* check the VBT to see whether the eDP is on DP-D port */
+bool intel_dpd_is_edp(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct child_device_config *p_child;
+       int i;
+
+       if (!dev_priv->child_dev_num)
+               return false;
+
+       for (i = 0; i < dev_priv->child_dev_num; i++) {
+               p_child = dev_priv->child_dev + i;
+
+               if (p_child->dvo_port == PORT_IDPD &&
+                   p_child->device_type == DEVICE_TYPE_eDP)
+                       return true;
+       }
+       return false;
+}
+
 void
 intel_dp_init(struct drm_device *dev, int output_reg)
 {
@@ -1395,6 +1544,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
        struct intel_connector *intel_connector;
        struct intel_dp_priv *dp_priv;
        const char *name = NULL;
+       int type;
 
        intel_encoder = kcalloc(sizeof(struct intel_encoder) +
                               sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
@@ -1409,18 +1559,24 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 
        dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
 
+       if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D))
+               if (intel_dpd_is_edp(dev))
+                       dp_priv->is_pch_edp = true;
+
+       if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
+               type = DRM_MODE_CONNECTOR_eDP;
+               intel_encoder->type = INTEL_OUTPUT_EDP;
+       } else {
+               type = DRM_MODE_CONNECTOR_DisplayPort;
+               intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+       }
+
        connector = &intel_connector->base;
-       drm_connector_init(dev, connector, &intel_dp_connector_funcs,
-                          DRM_MODE_CONNECTOR_DisplayPort);
+       drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
        drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
 
        connector->polled = DRM_CONNECTOR_POLL_HPD;
 
-       if (output_reg == DP_A)
-               intel_encoder->type = INTEL_OUTPUT_EDP;
-       else
-               intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
-
        if (output_reg == DP_B || output_reg == PCH_DP_B)
                intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
        else if (output_reg == DP_C || output_reg == PCH_DP_C)
@@ -1479,7 +1635,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
        intel_encoder->ddc_bus = &dp_priv->adapter;
        intel_encoder->hot_plug = intel_dp_hot_plug;
 
-       if (output_reg == DP_A) {
+       if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
                /* initialize panel mode from VBT if available for eDP */
                if (dev_priv->lfp_lvds_vbt_mode) {
                        dev_priv->panel_fixed_mode =
index 72206f37c4fb847c8d32d80c5c893fd3a68a5faa..b2190148703a0c7c2b9a507783a056ac8a3d7814 100644 (file)
@@ -143,8 +143,6 @@ struct intel_crtc {
        struct drm_crtc base;
        enum pipe pipe;
        enum plane plane;
-       struct drm_gem_object *cursor_bo;
-       uint32_t cursor_addr;
        u8 lut_r[256], lut_g[256], lut_b[256];
        int dpms_mode;
        bool busy; /* is scanout buffer being updated frequently? */
@@ -153,6 +151,12 @@ struct intel_crtc {
        struct intel_overlay *overlay;
        struct intel_unpin_work *unpin_work;
        int fdi_lanes;
+
+       struct drm_gem_object *cursor_bo;
+       uint32_t cursor_addr;
+       int16_t cursor_x, cursor_y;
+       int16_t cursor_width, cursor_height;
+       bool cursor_visble;
 };
 
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -179,6 +183,8 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
 void
 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
                 struct drm_display_mode *adjusted_mode);
+extern bool intel_pch_has_edp(struct drm_crtc *crtc);
+extern bool intel_dpd_is_edp(struct drm_device *dev);
 extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
 
 
@@ -215,6 +221,9 @@ extern void intel_init_clock_gating(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
 
+extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+                                     struct drm_gem_object *obj);
+
 extern int intel_framebuffer_init(struct drm_device *dev,
                                  struct intel_framebuffer *ifb,
                                  struct drm_mode_fb_cmd *mode_cmd,
index c3c505244e07367c3a8a3cb17698d60ef5ce5407..3e18c9e7729b6a475ce270d3329acf3d54288f1c 100644 (file)
@@ -98,7 +98,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
 
        mutex_lock(&dev->struct_mutex);
 
-       ret = i915_gem_object_pin(fbo, 64*1024);
+       ret = intel_pin_and_fence_fb_obj(dev, fbo);
        if (ret) {
                DRM_ERROR("failed to pin fb: %d\n", ret);
                goto out_unref;
@@ -236,7 +236,7 @@ int intel_fbdev_destroy(struct drm_device *dev,
 
        drm_framebuffer_cleanup(&ifb->base);
        if (ifb->obj)
-               drm_gem_object_unreference_unlocked(ifb->obj);
+               drm_gem_object_unreference(ifb->obj);
 
        return 0;
 }
index 83bd764b000e51cff80ac0a79a14b8cc5f05fcb4..197887ed1823fcc4e1023d3fbcc2900488b2796c 100644 (file)
@@ -54,10 +54,11 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
        struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
        u32 sdvox;
 
-       sdvox = SDVO_ENCODING_HDMI |
-               SDVO_BORDER_ENABLE |
-               SDVO_VSYNC_ACTIVE_HIGH |
-               SDVO_HSYNC_ACTIVE_HIGH;
+       sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+               sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+               sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
 
        if (hdmi_priv->has_hdmi_sink) {
                sdvox |= SDVO_AUDIO_ENABLE;
diff --git