Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 31 Dec 2008 01:28:09 +0000 (17:28 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 31 Dec 2008 01:28:09 +0000 (17:28 -0800)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  slub: avoid leaking caches or refcounts on sysfs error
  slab: Fix comment on #endif
  slab: remove GFP_THISNODE clearing from alloc_slabmgmt()
  slub: Add might_sleep_if() to slab_alloc()
  SLUB: failslab support
  slub: Fix incorrect use of loose
  slab: Update the kmem_cache_create documentation regarding the name parameter
  slub: make early_kmem_cache_node_alloc void
  slab: unsigned slabp->inuse cannot be less than 0
  slub - fix get_object_page comment
  SLUB: Replace __builtin_return_address(0) with _RET_IP_.
  SLUB: cleanup - define macros instead of hardcoded numbers

507 files changed:
Documentation/RCU/00-INDEX
Documentation/RCU/trace.txt [new file with mode: 0644]
Documentation/block/biodoc.txt
Documentation/lockstat.txt
Makefile
arch/alpha/include/asm/io.h
arch/powerpc/platforms/pseries/rtasd.c
arch/s390/mm/pgtable.c
arch/sparc/Kconfig
arch/sparc/Kconfig.debug
arch/sparc/Makefile
arch/sparc/boot/.gitignore [moved from arch/sparc64/boot/.gitignore with 60% similarity]
arch/sparc/boot/Makefile
arch/sparc/boot/piggyback_32.c [moved from arch/sparc/boot/piggyback.c with 100% similarity]
arch/sparc/boot/piggyback_64.c [moved from arch/sparc64/boot/piggyback.c with 100% similarity]
arch/sparc/configs/sparc32_defconfig [moved from arch/sparc/defconfig with 100% similarity]
arch/sparc/configs/sparc64_defconfig [moved from arch/sparc64/defconfig with 100% similarity]
arch/sparc/include/asm/Kbuild
arch/sparc/include/asm/asm.h [new file with mode: 0644]
arch/sparc/include/asm/atomic_64.h
arch/sparc/include/asm/bitops_64.h
arch/sparc/include/asm/hypervisor.h
arch/sparc/include/asm/irq_32.h
arch/sparc/include/asm/irq_64.h
arch/sparc/include/asm/irqflags_64.h
arch/sparc/include/asm/module.h
arch/sparc/include/asm/module_32.h [deleted file]
arch/sparc/include/asm/module_64.h [deleted file]
arch/sparc/include/asm/openprom_32.h
arch/sparc/include/asm/oplib_32.h
arch/sparc/include/asm/pil.h
arch/sparc/include/asm/scatterlist.h
arch/sparc/include/asm/scatterlist_32.h [deleted file]
arch/sparc/include/asm/scatterlist_64.h [deleted file]
arch/sparc/include/asm/sections.h
arch/sparc/include/asm/sections_32.h [deleted file]
arch/sparc/include/asm/sections_64.h [deleted file]
arch/sparc/include/asm/spinlock_64.h
arch/sparc/include/asm/spitfire.h
arch/sparc/include/asm/system_32.h
arch/sparc/include/asm/system_64.h
arch/sparc/include/asm/tsb.h
arch/sparc/include/asm/ttable.h
arch/sparc/include/asm/unistd.h
arch/sparc/include/asm/unistd_32.h [deleted file]
arch/sparc/include/asm/unistd_64.h [deleted file]
arch/sparc/kernel/.gitignore [new file with mode: 0644]
arch/sparc/kernel/Makefile
arch/sparc/kernel/asm-offsets.c
arch/sparc/kernel/audit.c [moved from arch/sparc64/kernel/audit.c with 100% similarity]
arch/sparc/kernel/auxio_32.c [moved from arch/sparc/kernel/auxio.c with 100% similarity]
arch/sparc/kernel/auxio_64.c [moved from arch/sparc64/kernel/auxio.c with 67% similarity]
arch/sparc/kernel/central.c [moved from arch/sparc64/kernel/central.c with 100% similarity]
arch/sparc/kernel/cherrs.S [moved from arch/sparc64/kernel/cherrs.S with 98% similarity]
arch/sparc/kernel/chmc.c [moved from arch/sparc64/kernel/chmc.c with 100% similarity]
arch/sparc/kernel/compat_audit.c [moved from arch/sparc64/kernel/compat_audit.c with 91% similarity]
arch/sparc/kernel/cpu.c
arch/sparc/kernel/devices.c
arch/sparc/kernel/ds.c [moved from arch/sparc64/kernel/ds.c with 100% similarity]
arch/sparc/kernel/dtlb_miss.S [moved from arch/sparc64/kernel/dtlb_miss.S with 100% similarity]
arch/sparc/kernel/dtlb_prot.S [moved from arch/sparc64/kernel/dtlb_prot.S with 100% similarity]
arch/sparc/kernel/ebus.c [moved from arch/sparc64/kernel/ebus.c with 100% similarity]
arch/sparc/kernel/entry.h [moved from arch/sparc64/kernel/entry.h with 79% similarity]
arch/sparc/kernel/etrap_32.S [moved from arch/sparc/kernel/etrap.S with 100% similarity]
arch/sparc/kernel/etrap_64.S [moved from arch/sparc64/kernel/etrap.S with 97% similarity]
arch/sparc/kernel/fpu_traps.S [moved from arch/sparc64/kernel/fpu_traps.S with 100% similarity]
arch/sparc/kernel/ftrace.c [moved from arch/sparc64/kernel/ftrace.c with 100% similarity]
arch/sparc/kernel/getsetcc.S [moved from arch/sparc64/kernel/getsetcc.S with 100% similarity]
arch/sparc/kernel/head_32.S [moved from arch/sparc/kernel/head.S with 99% similarity]
arch/sparc/kernel/head_64.S [moved from arch/sparc64/kernel/head.S with 99% similarity]
arch/sparc/kernel/helpers.S [moved from arch/sparc64/kernel/helpers.S with 100% similarity]
arch/sparc/kernel/hvapi.c [moved from arch/sparc64/kernel/hvapi.c with 100% similarity]
arch/sparc/kernel/hvcalls.S [moved from arch/sparc64/kernel/hvcalls.S with 96% similarity]
arch/sparc/kernel/hvtramp.S [moved from arch/sparc64/kernel/hvtramp.S with 95% similarity]
arch/sparc/kernel/idprom.c
arch/sparc/kernel/init_task.c
arch/sparc/kernel/iommu.c [moved from arch/sparc64/kernel/iommu.c with 100% similarity]
arch/sparc/kernel/iommu_common.h [moved from arch/sparc64/kernel/iommu_common.h with 100% similarity]
arch/sparc/kernel/ioport.c
arch/sparc/kernel/irq_32.c [moved from arch/sparc/kernel/irq.c with 99% similarity]
arch/sparc/kernel/irq_64.c [moved from arch/sparc64/kernel/irq.c with 94% similarity]
arch/sparc/kernel/itlb_miss.S [moved from arch/sparc64/kernel/itlb_miss.S with 100% similarity]
arch/sparc/kernel/ivec.S [moved from arch/sparc64/kernel/ivec.S with 100% similarity]
arch/sparc/kernel/kernel.h [new file with mode: 0644]
arch/sparc/kernel/kgdb_32.c [moved from arch/sparc/kernel/kgdb.c with 100% similarity]
arch/sparc/kernel/kgdb_64.c [moved from arch/sparc64/kernel/kgdb.c with 100% similarity]
arch/sparc/kernel/kprobes.c [moved from arch/sparc64/kernel/kprobes.c with 100% similarity]
arch/sparc/kernel/kstack.h [moved from arch/sparc64/kernel/kstack.h with 100% similarity]
arch/sparc/kernel/ktlb.S [moved from arch/sparc64/kernel/ktlb.S with 100% similarity]
arch/sparc/kernel/ldc.c [moved from arch/sparc64/kernel/ldc.c with 100% similarity]
arch/sparc/kernel/mdesc.c [moved from arch/sparc64/kernel/mdesc.c with 99% similarity]
arch/sparc/kernel/misctrap.S [moved from arch/sparc64/kernel/misctrap.S with 100% similarity]
arch/sparc/kernel/module.c
arch/sparc/kernel/muldiv.c
arch/sparc/kernel/of_device_32.c [moved from arch/sparc/kernel/of_device.c with 100% similarity]
arch/sparc/kernel/of_device_64.c [moved from arch/sparc64/kernel/of_device.c with 98% similarity]
arch/sparc/kernel/pci.c [moved from arch/sparc64/kernel/pci.c with 100% similarity]
arch/sparc/kernel/pci_common.c [moved from arch/sparc64/kernel/pci_common.c with 100% similarity]
arch/sparc/kernel/pci_fire.c [moved from arch/sparc64/kernel/pci_fire.c with 100% similarity]
arch/sparc/kernel/pci_impl.h [moved from arch/sparc64/kernel/pci_impl.h with 100% similarity]
arch/sparc/kernel/pci_msi.c [moved from arch/sparc64/kernel/pci_msi.c with 100% similarity]
arch/sparc/kernel/pci_psycho.c [moved from arch/sparc64/kernel/pci_psycho.c with 100% similarity]
arch/sparc/kernel/pci_sabre.c [moved from arch/sparc64/kernel/pci_sabre.c with 100% similarity]
arch/sparc/kernel/pci_schizo.c [moved from arch/sparc64/kernel/pci_schizo.c with 100% similarity]
arch/sparc/kernel/pci_sun4v.c [moved from arch/sparc64/kernel/pci_sun4v.c with 100% similarity]
arch/sparc/kernel/pci_sun4v.h [moved from arch/sparc64/kernel/pci_sun4v.h with 100% similarity]
arch/sparc/kernel/pci_sun4v_asm.S [moved from arch/sparc64/kernel/pci_sun4v_asm.S with 100% similarity]
arch/sparc/kernel/pcic.c
arch/sparc/kernel/pmc.c
arch/sparc/kernel/power.c [moved from arch/sparc64/kernel/power.c with 100% similarity]
arch/sparc/kernel/process_32.c [moved from arch/sparc/kernel/process.c with 99% similarity]
arch/sparc/kernel/process_64.c [moved from arch/sparc64/kernel/process.c with 100% similarity]
arch/sparc/kernel/prom.h [new file with mode: 0644]
arch/sparc/kernel/prom_32.c [moved from arch/sparc/kernel/prom.c with 51% similarity]
arch/sparc/kernel/prom_64.c [new file with mode: 0644]
arch/sparc/kernel/prom_common.c [new file with mode: 0644]
arch/sparc/kernel/prom_irqtrans.c [moved from arch/sparc64/kernel/prom.c with 53% similarity]
arch/sparc/kernel/psycho_common.c [moved from arch/sparc64/kernel/psycho_common.c with 100% similarity]
arch/sparc/kernel/psycho_common.h [moved from arch/sparc64/kernel/psycho_common.h with 100% similarity]
arch/sparc/kernel/ptrace_32.c [moved from arch/sparc/kernel/ptrace.c with 100% similarity]
arch/sparc/kernel/ptrace_64.c [moved from arch/sparc64/kernel/ptrace.c with 100% similarity]
arch/sparc/kernel/reboot.c [moved from arch/sparc64/kernel/reboot.c with 100% similarity]
arch/sparc/kernel/rtrap_32.S [moved from arch/sparc/kernel/rtrap.S with 100% similarity]
arch/sparc/kernel/rtrap_64.S [moved from arch/sparc64/kernel/rtrap.S with 95% similarity]
arch/sparc/kernel/sbus.c [moved from arch/sparc64/kernel/sbus.c with 100% similarity]
arch/sparc/kernel/setup_32.c [moved from arch/sparc/kernel/setup.c with 98% similarity]
arch/sparc/kernel/setup_64.c [moved from arch/sparc64/kernel/setup.c with 99% similarity]
arch/sparc/kernel/signal32.c [moved from arch/sparc64/kernel/signal32.c with 100% similarity]
arch/sparc/kernel/signal_32.c [moved from arch/sparc/kernel/signal.c with 100% similarity]
arch/sparc/kernel/signal_64.c [moved from arch/sparc64/kernel/signal.c with 100% similarity]
arch/sparc/kernel/smp_32.c [moved from arch/sparc/kernel/smp.c with 100% similarity]
arch/sparc/kernel/smp_64.c [moved from arch/sparc64/kernel/smp.c with 99% similarity]
arch/sparc/kernel/sparc_ksyms_32.c [moved from arch/sparc/kernel/sparc_ksyms.c with 98% similarity]
arch/sparc/kernel/sparc_ksyms_64.c [moved from arch/sparc64/kernel/sparc64_ksyms.c with 97% similarity]
arch/sparc/kernel/spiterrs.S [moved from arch/sparc64/kernel/spiterrs.S with 99% similarity]
arch/sparc/kernel/sstate.c [moved from arch/sparc64/kernel/sstate.c with 100% similarity]
arch/sparc/kernel/stacktrace.c [moved from arch/sparc64/kernel/stacktrace.c with 60% similarity]
arch/sparc/kernel/starfire.c [moved from arch/sparc64/kernel/starfire.c with 100% similarity]
arch/sparc/kernel/sun4c_irq.c
arch/sparc/kernel/sun4d_irq.c
arch/sparc/kernel/sun4m_irq.c
arch/sparc/kernel/sun4v_ivec.S [moved from arch/sparc64/kernel/sun4v_ivec.S with 98% similarity]
arch/sparc/kernel/sun4v_tlb_miss.S [moved from arch/sparc64/kernel/sun4v_tlb_miss.S with 100% similarity]
arch/sparc/kernel/sys32.S [moved from arch/sparc64/kernel/sys32.S with 100% similarity]
arch/sparc/kernel/sys_sparc32.c [moved from arch/sparc64/kernel/sys_sparc32.c with 100% similarity]
arch/sparc/kernel/sys_sparc_32.c [moved from arch/sparc/kernel/sys_sparc.c with 100% similarity]
arch/sparc/kernel/sys_sparc_64.c [moved from arch/sparc64/kernel/sys_sparc.c with 100% similarity]
arch/sparc/kernel/syscalls.S [moved from arch/sparc64/kernel/syscalls.S with 100% similarity]
arch/sparc/kernel/sysfs.c [moved from arch/sparc64/kernel/sysfs.c with 99% similarity]
arch/sparc/kernel/systbls.h [moved from arch/sparc64/kernel/systbls.h with 100% similarity]
arch/sparc/kernel/systbls_32.S [moved from arch/sparc/kernel/systbls.S with 100% similarity]
arch/sparc/kernel/systbls_64.S [moved from arch/sparc64/kernel/systbls.S with 100% similarity]
arch/sparc/kernel/time_32.c [moved from arch/sparc/kernel/time.c with 100% similarity]
arch/sparc/kernel/time_64.c [moved from arch/sparc64/kernel/time.c with 100% similarity]
arch/sparc/kernel/trampoline_32.S [moved from arch/sparc/kernel/trampoline.S with 100% similarity]
arch/sparc/kernel/trampoline_64.S [moved from arch/sparc64/kernel/trampoline.S with 98% similarity]
arch/sparc/kernel/traps_32.c [moved from arch/sparc/kernel/traps.c with 94% similarity]
arch/sparc/kernel/traps_64.c [moved from arch/sparc64/kernel/traps.c with 99% similarity]
arch/sparc/kernel/tsb.S [moved from arch/sparc64/kernel/tsb.S with 99% similarity]
arch/sparc/kernel/ttable.S [moved from arch/sparc64/kernel/ttable.S with 99% similarity]
arch/sparc/kernel/una_asm_32.S [moved from arch/sparc/kernel/una_asm.S with 100% similarity]
arch/sparc/kernel/una_asm_64.S [moved from arch/sparc64/kernel/una_asm.S with 100% similarity]
arch/sparc/kernel/unaligned_32.c [moved from arch/sparc/kernel/unaligned.c with 100% similarity]
arch/sparc/kernel/unaligned_64.c [moved from arch/sparc64/kernel/unaligned.c with 100% similarity]
arch/sparc/kernel/us2e_cpufreq.c [moved from arch/sparc64/kernel/us2e_cpufreq.c with 100% similarity]
arch/sparc/kernel/us3_cpufreq.c [moved from arch/sparc64/kernel/us3_cpufreq.c with 100% similarity]
arch/sparc/kernel/utrap.S [moved from arch/sparc64/kernel/utrap.S with 100% similarity]
arch/sparc/kernel/vio.c [moved from arch/sparc64/kernel/vio.c with 100% similarity]
arch/sparc/kernel/viohs.c [moved from arch/sparc64/kernel/viohs.c with 100% similarity]
arch/sparc/kernel/visemul.c [moved from arch/sparc64/kernel/visemul.c with 100% similarity]
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/kernel/winfixup.S [moved from arch/sparc64/kernel/winfixup.S with 100% similarity]
arch/sparc/lib/GENbzero.S [moved from arch/sparc64/lib/GENbzero.S with 100% similarity]
arch/sparc/lib/GENcopy_from_user.S [moved from arch/sparc64/lib/GENcopy_from_user.S with 100% similarity]
arch/sparc/lib/GENcopy_to_user.S [moved from arch/sparc64/lib/GENcopy_to_user.S with 100% similarity]
arch/sparc/lib/GENmemcpy.S [moved from arch/sparc64/lib/GENmemcpy.S with 100% similarity]
arch/sparc/lib/GENpage.S [moved from arch/sparc64/lib/GENpage.S with 100% similarity]
arch/sparc/lib/GENpatch.S [moved from arch/sparc64/lib/GENpatch.S with 100% similarity]
arch/sparc/lib/Makefile
arch/sparc/lib/NG2copy_from_user.S [moved from arch/sparc64/lib/NG2copy_from_user.S with 100% similarity]
arch/sparc/lib/NG2copy_to_user.S [moved from arch/sparc64/lib/NG2copy_to_user.S with 100% similarity]
arch/sparc/lib/NG2memcpy.S [moved from arch/sparc64/lib/NG2memcpy.S with 100% similarity]
arch/sparc/lib/NG2page.S [moved from arch/sparc64/lib/NG2page.S with 100% similarity]
arch/sparc/lib/NG2patch.S [moved from arch/sparc64/lib/NG2patch.S with 100% similarity]
arch/sparc/lib/NGbzero.S [moved from arch/sparc64/lib/NGbzero.S with 100% similarity]
arch/sparc/lib/NGcopy_from_user.S [moved from arch/sparc64/lib/NGcopy_from_user.S with 100% similarity]
arch/sparc/lib/NGcopy_to_user.S [moved from arch/sparc64/lib/NGcopy_to_user.S with 100% similarity]
arch/sparc/lib/NGmemcpy.S [moved from arch/sparc64/lib/NGmemcpy.S with 100% similarity]
arch/sparc/lib/NGpage.S [moved from arch/sparc64/lib/NGpage.S with 100% similarity]
arch/sparc/lib/NGpatch.S [moved from arch/sparc64/lib/NGpatch.S with 100% similarity]
arch/sparc/lib/PeeCeeI.c [moved from arch/sparc64/lib/PeeCeeI.c with 100% similarity]
arch/sparc/lib/U1copy_from_user.S [moved from arch/sparc64/lib/U1copy_from_user.S with 100% similarity]
arch/sparc/lib/U1copy_to_user.S [moved from arch/sparc64/lib/U1copy_to_user.S with 100% similarity]
arch/sparc/lib/U1memcpy.S [moved from arch/sparc64/lib/U1memcpy.S with 100% similarity]
arch/sparc/lib/U3copy_from_user.S [moved from arch/sparc64/lib/U3copy_from_user.S with 100% similarity]
arch/sparc/lib/U3copy_to_user.S [moved from arch/sparc64/lib/U3copy_to_user.S with 100% similarity]
arch/sparc/lib/U3memcpy.S [moved from arch/sparc64/lib/U3memcpy.S with 100% similarity]
arch/sparc/lib/U3patch.S [moved from arch/sparc64/lib/U3patch.S with 100% similarity]
arch/sparc/lib/VISsave.S [moved from arch/sparc64/lib/VISsave.S with 100% similarity]
arch/sparc/lib/atomic_32.S [moved from arch/sparc/lib/atomic.S with 100% similarity]
arch/sparc/lib/atomic_64.S [moved from arch/sparc64/lib/atomic.S with 84% similarity]
arch/sparc/lib/bitops.S [moved from arch/sparc64/lib/bitops.S with 84% similarity]
arch/sparc/lib/bzero.S [moved from arch/sparc64/lib/bzero.S with 100% similarity]
arch/sparc/lib/checksum_32.S [moved from arch/sparc/lib/checksum.S with 100% similarity]
arch/sparc/lib/checksum_64.S [moved from arch/sparc64/lib/checksum.S with 100% similarity]
arch/sparc/lib/clear_page.S [moved from arch/sparc64/lib/clear_page.S with 100% similarity]
arch/sparc/lib/copy_in_user.S [moved from arch/sparc64/lib/copy_in_user.S with 100% similarity]
arch/sparc/lib/copy_page.S [moved from arch/sparc64/lib/copy_page.S with 100% similarity]
arch/sparc/lib/csum_copy.S [moved from arch/sparc64/lib/csum_copy.S with 100% similarity]
arch/sparc/lib/csum_copy_from_user.S [moved from arch/sparc64/lib/csum_copy_from_user.S with 100% similarity]
arch/sparc/lib/csum_copy_to_user.S [moved from arch/sparc64/lib/csum_copy_to_user.S with 100% similarity]
arch/sparc/lib/ipcsum.S [moved from arch/sparc64/lib/ipcsum.S with 100% similarity]
arch/sparc/lib/mcount.S [moved from arch/sparc64/lib/mcount.S with 100% similarity]
arch/sparc/lib/memcmp.S
arch/sparc/lib/memmove.S [moved from arch/sparc64/lib/memmove.S with 100% similarity]
arch/sparc/lib/memscan_32.S [moved from arch/sparc/lib/memscan.S with 100% similarity]
arch/sparc/lib/memscan_64.S [moved from arch/sparc64/lib/memscan.S with 100% similarity]
arch/sparc/lib/rwsem_32.S [moved from arch/sparc/lib/rwsem.S with 100% similarity]
arch/sparc/lib/rwsem_64.S [moved from arch/sparc64/lib/rwsem.S with 92% similarity]
arch/sparc/lib/strlen.S
arch/sparc/lib/strlen_user_32.S [moved from arch/sparc/lib/strlen_user.S with 100% similarity]
arch/sparc/lib/strlen_user_64.S [moved from arch/sparc64/lib/strlen_user.S with 100% similarity]
arch/sparc/lib/strncmp_32.S [moved from arch/sparc/lib/strncmp.S with 100% similarity]
arch/sparc/lib/strncmp_64.S [moved from arch/sparc64/lib/strncmp.S with 100% similarity]
arch/sparc/lib/strncpy_from_user_32.S [moved from arch/sparc/lib/strncpy_from_user.S with 100% similarity]
arch/sparc/lib/strncpy_from_user_64.S [moved from arch/sparc64/lib/strncpy_from_user.S with 100% similarity]
arch/sparc/lib/user_fixup.c [moved from arch/sparc64/lib/user_fixup.c with 100% similarity]
arch/sparc/lib/xor.S [moved from arch/sparc64/lib/xor.S with 100% similarity]
arch/sparc/math-emu/Makefile
arch/sparc/math-emu/ashldi3.S [deleted file]
arch/sparc/math-emu/math_32.c [moved from arch/sparc/math-emu/math.c with 99% similarity]
arch/sparc/math-emu/math_64.c [moved from arch/sparc64/math-emu/math.c with 99% similarity]
arch/sparc/math-emu/sfp-util_32.h [moved from arch/sparc/math-emu/sfp-util.h with 100% similarity]
arch/sparc/math-emu/sfp-util_64.h [moved from arch/sparc64/math-emu/sfp-util.h with 100% similarity]
arch/sparc/mm/Makefile
arch/sparc/mm/fault_32.c [moved from arch/sparc/mm/fault.c with 100% similarity]
arch/sparc/mm/fault_64.c [moved from arch/sparc64/mm/fault.c with 100% similarity]
arch/sparc/mm/generic_32.c [moved from arch/sparc/mm/generic.c with 100% similarity]
arch/sparc/mm/generic_64.c [moved from arch/sparc64/mm/generic.c with 100% similarity]
arch/sparc/mm/hugetlbpage.c [moved from arch/sparc64/mm/hugetlbpage.c with 100% similarity]
arch/sparc/mm/init_32.c [moved from arch/sparc/mm/init.c with 97% similarity]
arch/sparc/mm/init_64.c [moved from arch/sparc64/mm/init.c with 99% similarity]
arch/sparc/mm/init_64.h [moved from arch/sparc64/mm/init.h with 100% similarity]
arch/sparc/mm/io-unit.c
arch/sparc/mm/iommu.c
arch/sparc/mm/srmmu.c
arch/sparc/mm/sun4c.c
arch/sparc/mm/tlb.c [moved from arch/sparc64/mm/tlb.c with 100% similarity]
arch/sparc/mm/tsb.c [moved from arch/sparc64/mm/tsb.c with 97% similarity]
arch/sparc/mm/ultra.S [moved from arch/sparc64/mm/ultra.S with 99% similarity]
arch/sparc/oprofile/init.c
arch/sparc/prom/Makefile
arch/sparc/prom/bootstr_32.c [moved from arch/sparc/prom/bootstr.c with 100% similarity]
arch/sparc/prom/bootstr_64.c [moved from arch/sparc64/prom/bootstr.c with 100% similarity]
arch/sparc/prom/cif.S [moved from arch/sparc64/prom/cif.S with 100% similarity]
arch/sparc/prom/console_32.c [moved from arch/sparc/prom/console.c with 100% similarity]
arch/sparc/prom/console_64.c [moved from arch/sparc64/prom/console.c with 100% similarity]
arch/sparc/prom/devops_32.c [moved from arch/sparc/prom/devops.c with 100% similarity]
arch/sparc/prom/devops_64.c [moved from arch/sparc64/prom/devops.c with 100% similarity]
arch/sparc/prom/init_32.c [moved from arch/sparc/prom/init.c with 100% similarity]
arch/sparc/prom/init_64.c [moved from arch/sparc64/prom/init.c with 100% similarity]
arch/sparc/prom/misc_32.c [moved from arch/sparc/prom/misc.c with 98% similarity]
arch/sparc/prom/misc_64.c [moved from arch/sparc64/prom/misc.c with 100% similarity]
arch/sparc/prom/p1275.c [moved from arch/sparc64/prom/p1275.c with 100% similarity]
arch/sparc/prom/printf.c
arch/sparc/prom/tree_32.c [moved from arch/sparc/prom/tree.c with 97% similarity]
arch/sparc/prom/tree_64.c [moved from arch/sparc64/prom/tree.c with 100% similarity]
arch/sparc64/Kconfig [deleted file]
arch/sparc64/Kconfig.debug [deleted file]
arch/sparc64/Makefile [deleted file]
arch/sparc64/boot/Makefile [deleted file]
arch/sparc64/kernel/Makefile [deleted file]
arch/sparc64/kernel/asm-offsets.c [deleted file]
arch/sparc64/kernel/cpu.c [deleted file]
arch/sparc64/kernel/idprom.c [deleted file]
arch/sparc64/kernel/init_task.c [deleted file]
arch/sparc64/kernel/module.c [deleted file]
arch/sparc64/kernel/vmlinux.lds.S [deleted file]
arch/sparc64/lib/Makefile [deleted file]
arch/sparc64/lib/iomap.c [deleted file]
arch/sparc64/lib/memcmp.S [deleted file]
arch/sparc64/lib/strlen.S [deleted file]
arch/sparc64/math-emu/Makefile [deleted file]
arch/sparc64/mm/Makefile [deleted file]
arch/sparc64/oprofile/Makefile [deleted file]
arch/sparc64/oprofile/init.c [deleted file]
arch/sparc64/prom/Makefile [deleted file]
arch/sparc64/prom/printf.c [deleted file]
arch/um/include/asm/system.h
arch/x86/Kconfig
arch/x86/include/asm/dma-mapping.h
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/iommu.h
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/pci.h
arch/x86/include/asm/pci_64.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_64.h
arch/x86/kernel/Makefile
arch/x86/kernel/hpet.c
arch/x86/kernel/io_apic.c
arch/x86/kernel/irq.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/irqinit_32.c
arch/x86/kernel/irqinit_64.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/pci-swiotlb_64.c
arch/x86/kernel/quirks.c
arch/x86/kernel/setup.c
arch/x86/lib/usercopy_32.c
arch/x86/lib/usercopy_64.c
arch/x86/mm/init_32.c
block/Kconfig
block/as-iosched.c
block/blk-barrier.c
block/blk-core.c
block/blk-settings.c
block/blk-softirq.c
block/blk-sysfs.c
block/blk-tag.c
block/blk-timeout.c
block/cfq-iosched.c
block/compat_ioctl.c
block/deadline-iosched.c
block/elevator.c
block/genhd.c
block/ioctl.c
block/noop-iosched.c
block/scsi_ioctl.c
drivers/block/cciss.c
drivers/block/cciss.h
drivers/block/cciss_cmd.h
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/cdrom/cdrom.c
drivers/char/agp/intel-agp.c
drivers/char/hpet.c
drivers/char/random.c
drivers/clocksource/acpi_pm.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/drm_auth.c
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/drm_crtc.c [new file with mode: 0644]
drivers/gpu/drm/drm_crtc_helper.c [new file with mode: 0644]
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c [new file with mode: 0644]
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_hashtab.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_lock.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_modes.c [new file with mode: 0644]
drivers/gpu/drm/drm_proc.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/dvo.h [new file with mode: 0644]
drivers/gpu/drm/i915/dvo_ch7017.c [new file with mode: 0644]
drivers/gpu/drm/i915/dvo_ch7xxx.c [new file with mode: 0644]
drivers/gpu/drm/i915/dvo_ivch.c [new file with mode: 0644]
drivers/gpu/drm/i915/dvo_sil164.c [new file with mode: 0644]
drivers/gpu/drm/i915/dvo_tfp410.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_proc.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_mem.c
drivers/gpu/drm/i915/i915_opregion.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_bios.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_bios.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_crt.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_display.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_drv.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_dvo.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_fb.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_i2c.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_lvds.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_modes.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_sdvo.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_sdvo_regs.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_tv.c [new file with mode: 0644]
drivers/gpu/drm/radeon/r300_cmdbuf.c
drivers/gpu/drm/radeon/radeon_cp.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_drv.h
drivers/gpu/drm/radeon/radeon_state.c
drivers/input/touchscreen/ads7846.c
drivers/md/dm-crypt.c
drivers/md/dm-io.c
drivers/md/dm.c
drivers/pci/intr_remapping.c
drivers/pci/msi.c
drivers/video/console/vgacon.c
drivers/video/cyber2000fb.c
drivers/xen/events.c
fs/aio.c
fs/bio-integrity.c
fs/bio.c
fs/buffer.c
fs/exec.c
fs/ext4/super.c
fs/proc/stat.c
include/asm-generic/bug.h
include/drm/Kbuild
include/drm/drm.h
include/drm/drmP.h
include/drm/drm_crtc.h [new file with mode: 0644]
include/drm/drm_crtc_helper.h [new file with mode: 0644]
include/drm/drm_edid.h [new file with mode: 0644]
include/drm/drm_mode.h [new file with mode: 0644]
include/drm/drm_sarea.h
include/drm/i915_drm.h
include/linux/Kbuild
include/linux/aio.h
include/linux/bio.h
include/linux/blkdev.h
include/linux/bottom_half.h
include/linux/buffer_head.h
include/linux/console.h
include/linux/debug_locks.h
include/linux/elevator.h
include/linux/futex.h
include/linux/genhd.h
include/linux/hardirq.h
include/linux/hrtimer.h
include/linux/interrupt.h
include/linux/irq.h
include/linux/irqnr.h
include/linux/kernel.h
include/linux/kernel_stat.h
include/linux/lockdep.h
include/linux/mm_types.h
include/linux/msi.h
include/linux/mutex.h
include/linux/of_platform.h
include/linux/posix-timers.h
include/linux/random.h
include/linux/rcuclassic.h
include/linux/rcupdate.h
include/linux/rcutree.h [new file with mode: 0644]
include/linux/swiotlb.h
include/linux/timex.h
include/linux/types.h
include/linux/uaccess.h
init/Kconfig
init/main.c
kernel/Kconfig.preempt
kernel/Makefile
kernel/exit.c
kernel/extable.c
kernel/fork.c
kernel/futex.c
kernel/hrtimer.c
kernel/irq/Makefile
kernel/irq/autoprobe.c
kernel/irq/chip.c
kernel/irq/handle.c
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/numa_migrate.c [new file with mode: 0644]
kernel/irq/proc.c
kernel/irq/spurious.c
kernel/lockdep.c
kernel/lockdep_proc.c
kernel/mutex.c
kernel/notifier.c
kernel/panic.c
kernel/posix-cpu-timers.c
kernel/posix-timers.c
kernel/printk.c
kernel/rcuclassic.c
kernel/rcupreempt.c
kernel/rcupreempt_trace.c
kernel/rcutorture.c
kernel/rcutree.c [new file with mode: 0644]
kernel/rcutree_trace.c [new file with mode: 0644]
kernel/resource.c
kernel/sched.c
kernel/softirq.c
kernel/softlockup.c
kernel/stacktrace.c
kernel/sys.c
kernel/sysctl.c
kernel/time/ntp.c
kernel/time/tick-sched.c
kernel/trace/trace_sysprof.c
lib/Kconfig.debug
lib/debugobjects.c
lib/swiotlb.c
mm/bounce.c
mm/memory.c
security/keys/keyctl.c
sound/core/hrtimer.c
sound/drivers/pcsp/pcsp.c

index 461481dfb7c31b4bfbb0509993ec0c2351cc97c2..7dc0695a8f902994647300b87a00cb9241d152fa 100644 (file)
@@ -16,6 +16,8 @@ RTFP.txt
        - List of RCU papers (bibliography) going back to 1980.
 torture.txt
        - RCU Torture Test Operation (CONFIG_RCU_TORTURE_TEST)
+trace.txt
+       - CONFIG_RCU_TRACE debugfs files and formats
 UP.txt
        - RCU on Uniprocessor Systems
 whatisRCU.txt
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
new file mode 100644 (file)
index 0000000..0688482
--- /dev/null
@@ -0,0 +1,413 @@
+CONFIG_RCU_TRACE debugfs Files and Formats
+
+
+The rcupreempt and rcutree implementations of RCU provide debugfs trace
+output that summarizes counters and state.  This information is useful for
+debugging RCU itself, and can sometimes also help to debug abuses of RCU.
+Note that the rcuclassic implementation of RCU does not provide debugfs
+trace output.
+
+The following sections describe the debugfs files and formats for
+preemptable RCU (rcupreempt) and hierarchical RCU (rcutree).
+
+
+Preemptable RCU debugfs Files and Formats
+
+This implementation of RCU provides three debugfs files under the
+top-level directory RCU: rcu/rcuctrs (which displays the per-CPU
+counters used by preemptable RCU) rcu/rcugp (which displays grace-period
+counters), and rcu/rcustats (which internal counters for debugging RCU).
+
+The output of "cat rcu/rcuctrs" looks as follows:
+
+CPU last cur F M
+  0    5  -5 0 0
+  1   -1   0 0 0
+  2    0   1 0 0
+  3    0   1 0 0
+  4    0   1 0 0
+  5    0   1 0 0
+  6    0   2 0 0
+  7    0  -1 0 0
+  8    0   1 0 0
+ggp = 26226, state = waitzero
+
+The per-CPU fields are as follows:
+
+o      "CPU" gives the CPU number.  Offline CPUs are not displayed.
+
+o      "last" gives the value of the counter that is being decremented
+       for the current grace period phase.  In the example above,
+       the counters sum to 4, indicating that there are still four
+       RCU read-side critical sections still running that started
+       before the last counter flip.
+
+o      "cur" gives the value of the counter that is currently being
+       both incremented (by rcu_read_lock()) and decremented (by
+       rcu_read_unlock()).  In the example above, the counters sum to
+       1, indicating that there is only one RCU read-side critical section
+       still running that started after the last counter flip.
+
+o      "F" indicates whether RCU is waiting for this CPU to acknowledge
+       a counter flip.  In the above example, RCU is not waiting on any,
+       which is consistent with the state being "waitzero" rather than
+       "waitack".
+
+o      "M" indicates whether RCU is waiting for this CPU to execute a
+       memory barrier.  In the above example, RCU is not waiting on any,
+       which is consistent with the state being "waitzero" rather than
+       "waitmb".
+
+o      "ggp" is the global grace-period counter.
+
+o      "state" is the RCU state, which can be one of the following:
+
+       o       "idle": there is no grace period in progress.
+
+       o       "waitack": RCU just incremented the global grace-period
+               counter, which has the effect of reversing the roles of
+               the "last" and "cur" counters above, and is waiting for
+               all the CPUs to acknowledge the flip.  Once the flip has
+               been acknowledged, CPUs will no longer be incrementing
+               what are now the "last" counters, so that their sum will
+               decrease monotonically down to zero.
+
+       o       "waitzero": RCU is waiting for the sum of the "last" counters
+               to decrease to zero.
+
+       o       "waitmb": RCU is waiting for each CPU to execute a memory
+               barrier, which ensures that instructions from a given CPU's
+               last RCU read-side critical section cannot be reordered
+               with instructions following the memory-barrier instruction.
+
+The output of "cat rcu/rcugp" looks as follows:
+
+oldggp=48870  newggp=48873
+
+Note that reading from this file provokes a synchronize_rcu().  The
+"oldggp" value is that of "ggp" from rcu/rcuctrs above, taken before
+executing the synchronize_rcu(), and the "newggp" value is also the
+"ggp" value, but taken after the synchronize_rcu() command returns.
+
+
+The output of "cat rcu/rcugp" looks as follows:
+
+na=1337955 nl=40 wa=1337915 wl=44 da=1337871 dl=0 dr=1337871 di=1337871
+1=50989 e1=6138 i1=49722 ie1=82 g1=49640 a1=315203 ae1=265563 a2=49640
+z1=1401244 ze1=1351605 z2=49639 m1=5661253 me1=5611614 m2=49639
+
+These are counters tracking internal preemptable-RCU events, however,
+some of them may be useful for debugging algorithms using RCU.  In
+particular, the "nl", "wl", and "dl" values track the number of RCU
+callbacks in various states.  The fields are as follows:
+
+o      "na" is the total number of RCU callbacks that have been enqueued
+       since boot.
+
+o      "nl" is the number of RCU callbacks waiting for the previous
+       grace period to end so that they can start waiting on the next
+       grace period.
+
+o      "wa" is the total number of RCU callbacks that have started waiting
+       for a grace period since boot.  "na" should be roughly equal to
+       "nl" plus "wa".
+
+o      "wl" is the number of RCU callbacks currently waiting for their
+       grace period to end.
+
+o      "da" is the total number of RCU callbacks whose grace periods
+       have completed since boot.  "wa" should be roughly equal to
+       "wl" plus "da".
+
+o      "dr" is the total number of RCU callbacks that have been removed
+       from the list of callbacks ready to invoke.  "dr" should be roughly
+       equal to "da".
+
+o      "di" is the total number of RCU callbacks that have been invoked
+       since boot.  "di" should be roughly equal to "da", though some
+       early versions of preemptable RCU had a bug so that only the
+       last CPU's count of invocations was displayed, rather than the
+       sum of all CPU's counts.
+
+o      "1" is the number of calls to rcu_try_flip().  This should be
+       roughly equal to the sum of "e1", "i1", "a1", "z1", and "m1"
+       described below.  In other words, the number of times that
+       the state machine is visited should be equal to the sum of the
+       number of times that each state is visited plus the number of
+       times that the state-machine lock acquisition failed.
+
+o      "e1" is the number of times that rcu_try_flip() was unable to
+       acquire the fliplock.
+
+o      "i1" is the number of calls to rcu_try_flip_idle().
+
+o      "ie1" is the number of times rcu_try_flip_idle() exited early
+       due to the calling CPU having no work for RCU.
+
+o      "g1" is the number of times that rcu_try_flip_idle() decided
+       to start a new grace period.  "i1" should be roughly equal to
+       "ie1" plus "g1".
+
+o      "a1" is the number of calls to rcu_try_flip_waitack().
+
+o      "ae1" is the number of times that rcu_try_flip_waitack() found
+       that at least one CPU had not yet acknowledge the new grace period
+       (AKA "counter flip").
+
+o      "a2" is the number of time rcu_try_flip_waitack() found that
+       all CPUs had acknowledged.  "a1" should be roughly equal to
+       "ae1" plus "a2".  (This particular output was collected on
+       a 128-CPU machine, hence the smaller-than-usual fraction of
+       calls to rcu_try_flip_waitack() finding all CPUs having already
+       acknowledged.)
+
+o      "z1" is the number of calls to rcu_try_flip_waitzero().
+
+o      "ze1" is the number of times that rcu_try_flip_waitzero() found
+       that not all of the old RCU read-side critical sections had
+       completed.
+
+o      "z2" is the number of times that rcu_try_flip_waitzero() finds
+       the sum of the counters equal to zero, in other words, that
+       all of the old RCU read-side critical sections had completed.
+       The value of "z1" should be roughly equal to "ze1" plus
+       "z2".
+
+o      "m1" is the number of calls to rcu_try_flip_waitmb().
+
+o      "me1" is the number of times that rcu_try_flip_waitmb() finds
+       that at least one CPU has not yet executed a memory barrier.
+
+o      "m2" is the number of times that rcu_try_flip_waitmb() finds that
+       all CPUs have executed a memory barrier.
+
+
+Hierarchical RCU debugfs Files and Formats
+
+This implementation of RCU provides three debugfs files under the
+top-level directory RCU: rcu/rcudata (which displays fields in struct
+rcu_data), rcu/rcugp (which displays grace-period counters), and
+rcu/rcuhier (which displays the struct rcu_node hierarchy).
+
+The output of "cat rcu/rcudata" looks as follows:
+
+rcu:
+  0 c=4011 g=4012 pq=1 pqc=4011 qp=0 rpfq=1 rp=3c2a dt=23301/73 dn=2 df=1882 of=0 ri=2126 ql=2 b=10
+  1 c=4011 g=4012 pq=1 pqc=4011 qp=0 rpfq=3 rp=39a6 dt=78073/1 dn=2 df=1402 of=0 ri=1875 ql=46 b=10
+  2 c=4010 g=4010 pq=1 pqc=4010 qp=0 rpfq=-5 rp=1d12 dt=16646/0 dn=2 df=3140 of=0 ri=2080 ql=0 b=10
+  3 c=4012 g=4013 pq=1 pqc=4012 qp=1 rpfq=3 rp=2b50 dt=21159/1 dn=2 df=2230 of=0 ri=1923 ql=72 b=10
+  4 c=4012 g=4013 pq=1 pqc=4012 qp=1 rpfq=3 rp=1644 dt=5783/1 dn=2 df=3348 of=0 ri=2805 ql=7 b=10
+  5 c=4012 g=4013 pq=0 pqc=4011 qp=1 rpfq=3 rp=1aac dt=5879/1 dn=2 df=3140 of=0 ri=2066 ql=10 b=10
+  6 c=4012 g=4013 pq=1 pqc=4012 qp=1 rpfq=3 rp=ed8 dt=5847/1 dn=2 df=3797 of=0 ri=1266 ql=10 b=10
+  7 c=4012 g=4013 pq=1 pqc=4012 qp=1 rpfq=3 rp=1fa2 dt=6199/1 dn=2 df=2795 of=0 ri=2162 ql=28 b=10
+rcu_bh:
+  0 c=-268 g=-268 pq=1 pqc=-268 qp=0 rpfq=-145 rp=21d6 dt=23301/73 dn=2 df=0 of=0 ri=0 ql=0 b=10
+  1 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-170 rp=20ce dt=78073/1 dn=2 df=26 of=0 ri=5 ql=0 b=10
+  2 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-83 rp=fbd dt=16646/0 dn=2 df=28 of=0 ri=4 ql=0 b=10
+  3 c=-268 g=-268 pq=1 pqc=-268 qp=0 rpfq=-105 rp=178c dt=21159/1 dn=2 df=28 of=0 ri=2 ql=0 b=10
+  4 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-30 rp=b54 dt=5783/1 dn=2 df=32 of=0 ri=0 ql=0 b=10
+  5 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-29 rp=df5 dt=5879/1 dn=2 df=30 of=0 ri=3 ql=0 b=10
+  6 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-28 rp=788 dt=5847/1 dn=2 df=32 of=0 ri=0 ql=0 b=10
+  7 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-53 rp=1098 dt=6199/1 dn=2 df=30 of=0 ri=3 ql=0 b=10
+
+The first section lists the rcu_data structures for rcu, the second for
+rcu_bh.  Each section has one line per CPU, or eight for this 8-CPU system.
+The fields are as follows:
+
+o      The number at the beginning of each line is the CPU number.
+       CPUs numbers followed by an exclamation mark are offline,
+       but have been online at least once since boot.  There will be
+       no output for CPUs that have never been online, which can be
+       a good thing in the surprisingly common case where NR_CPUS is
+       substantially larger than the number of actual CPUs.
+
+o      "c" is the count of grace periods that this CPU believes have
+       completed.  CPUs in dynticks idle mode may lag quite a ways
+       behind, for example, CPU 4 under "rcu" above, which has slept
+       through the past 25 RCU grace periods.  It is not unusual to
+       see CPUs lagging by thousands of grace periods.
+
+o      "g" is the count of grace periods that this CPU believes have
+       started.  Again, CPUs in dynticks idle mode may lag behind.
+       If the "c" and "g" values are equal, this CPU has already
+       reported a quiescent state for the last RCU grace period that
+       it is aware of, otherwise, the CPU believes that it owes RCU a
+       quiescent state.
+
+o      "pq" indicates that this CPU has passed through a quiescent state
+       for the current grace period.  It is possible for "pq" to be
+       "1" and "c" different than "g", which indicates that although
+       the CPU has passed through a quiescent state, either (1) this
+       CPU has not yet reported that fact, (2) some other CPU has not
+       yet reported for this grace period, or (3) both.
+
+o      "pqc" indicates which grace period the last-observed quiescent
+       state for this CPU corresponds to.  This is important for handling
+       the race between CPU 0 reporting an extended dynticks-idle
+       quiescent state for CPU 1 and CPU 1 suddenly waking up and
+       reporting its own quiescent state.  If CPU 1 was the last CPU
+       for the current grace period, then the CPU that loses this race
+       will attempt to incorrectly mark CPU 1 as having checked in for
+       the next grace period!
+
+o      "qp" indicates that RCU still expects a quiescent state from
+       this CPU.
+
+o      "rpfq" is the number of rcu_pending() calls on this CPU required
+       to induce this CPU to invoke force_quiescent_state().
+
+o      "rp" is low-order four hex digits of the count of how many times
+       rcu_pending() has been invoked on this CPU.
+
+o      "dt" is the current value of the dyntick counter that is incremented
+       when entering or leaving dynticks idle state, either by the
+       scheduler or by irq.  The number after the "/" is the interrupt
+       nesting depth when in dyntick-idle state, or one greater than
+       the interrupt-nesting depth otherwise.
+
+       This field is displayed only for CONFIG_NO_HZ kernels.
+
+o      "dn" is the current value of the dyntick counter that is incremented
+       when entering or leaving dynticks idle state via NMI.  If both
+       the "dt" and "dn" values are even, then this CPU is in dynticks
+       idle mode and may be ignored by RCU.  If either of these two
+       counters is odd, then RCU must be alert to the possibility of
+       an RCU read-side critical section running on this CPU.
+
+       This field is displayed only for CONFIG_NO_HZ kernels.
+
+o      "df" is the number of times that some other CPU has forced a
+       quiescent state on behalf of this CPU due to this CPU being in
+       dynticks-idle state.
+
+       This field is displayed only for CONFIG_NO_HZ kernels.
+
+o      "of" is the number of times that some other CPU has forced a
+       quiescent state on behalf of this CPU due to this CPU being
+       offline.  In a perfect world, this might neve happen, but it
+       turns out that offlining and onlining a CPU can take several grace
+       periods, and so there is likely to be an extended period of time
+       when RCU believes that the CPU is online when it really is not.
+       Please note that erring in the other direction (RCU believing a
+       CPU is offline when it is really alive and kicking) is a fatal
+       error, so it makes sense to err conservatively.
+
+o      "ri" is the number of times that RCU has seen fit to send a
+       reschedule IPI to this CPU in order to get it to report a
+       quiescent state.
+
+o      "ql" is the number of RCU callbacks currently residing on
+       this CPU.  This is the total number of callbacks, regardless
+       of what state they are in (new, waiting for grace period to
+       start, waiting for grace period to end, ready to invoke).
+
+o      "b" is the batch limit for this CPU.  If more than this number
+       of RCU callbacks is ready to invoke, then the remainder will
+       be deferred.
+
+
+The output of "cat rcu/rcugp" looks as follows:
+
+rcu: completed=33062  gpnum=33063
+rcu_bh: completed=464  gpnum=464
+
+Again, this output is for both "rcu" and "rcu_bh".  The fields are
+taken from the rcu_state structure, and are as follows:
+
+o      "completed" is the number of grace periods that have completed.
+       It is comparable to the "c" field from rcu/rcudata in that a
+       CPU whose "c" field matches the value of "completed" is aware
+       that the corresponding RCU grace period has completed.
+
+o      "gpnum" is the number of grace periods that have started.  It is
+       comparable to the "g" field from rcu/rcudata in that a CPU
+       whose "g" field matches the value of "gpnum" is aware that the
+       corresponding RCU grace period has started.
+
+       If these two fields are equal (as they are for "rcu_bh" above),
+       then there is no grace period in progress, in other words, RCU
+       is idle.  On the other hand, if the two fields differ (as they
+       do for "rcu" above), then an RCU grace period is in progress.
+
+
+The output of "cat rcu/rcuhier" looks as follows, with very long lines:
+
+c=6902 g=6903 s=2 jfq=3 j=72c7 nfqs=13142/nfqsng=0(13142) fqlh=6
+1/1 0:127 ^0    
+3/3 0:35 ^0    0/0 36:71 ^1    0/0 72:107 ^2    0/0 108:127 ^3    
+3/3f 0:5 ^0    2/3 6:11 ^1    0/0 12:17 ^2    0/0 18:23 ^3    0/0 24:29 ^4    0/0 30:35 ^5    0/0 36:41 ^0    0/0 42:47 ^1    0/0 48:53 ^2    0/0 54:59 ^3    0/0 60:65 ^4    0/0 66:71 ^5    0/0 72:77 ^0    0/0 78:83 ^1    0/0 84:89 ^2    0/0 90:95 ^3    0/0 96:101 ^4    0/0 102:107 ^5    0/0 108:113 ^0    0/0 114:119 ^1    0/0 120:125 ^2    0/0 126:127 ^3    
+rcu_bh:
+c=-226 g=-226 s=1 jfq=-5701 j=72c7 nfqs=88/nfqsng=0(88) fqlh=0
+0/1 0:127 ^0    
+0/3 0:35 ^0    0/0 36:71 ^1    0/0 72:107 ^2    0/0 108:127 ^3    
+0/3f 0:5 ^0    0/3 6:11 ^1    0/0 12:17 ^2    0/0 18:23 ^3    0/0 24:29 ^4    0/0 30:35 ^5    0/0 36:41 ^0    0/0 42:47 ^1    0/0 48:53 ^2    0/0 54:59 ^3    0/0 60:65 ^4    0/0 66:71 ^5    0/0 72:77 ^0    0/0 78:83 ^1    0/0 84:89 ^2    0/0 90:95 ^3    0/0 96:101 ^4    0/0 102:107 ^5    0/0 108:113 ^0    0/0 114:119 ^1    0/0 120:125 ^2    0/0 126:127 ^3
+
+This is once again split into "rcu" and "rcu_bh" portions.  The fields are
+as follows:
+
+o      "c" is exactly the same as "completed" under rcu/rcugp.
+
+o      "g" is exactly the same as "gpnum" under rcu/rcugp.
+
+o      "s" is the "signaled" state that drives force_quiescent_state()'s
+       state machine.
+
+o      "jfq" is the number of jiffies remaining for this grace period
+       before force_quiescent_state() is invoked to help push things
+       along.  Note that CPUs in dyntick-idle mode thoughout the grace
+       period will not report on their own, but rather must be check by
+       some other CPU via force_quiescent_state().
+
+o      "j" is the low-order four hex digits of the jiffies counter.
+       Yes, Paul did run into a number of problems that turned out to
+       be due to the jiffies counter no longer counting.  Why do you ask?
+
+o      "nfqs" is the number of calls to force_quiescent_state() since
+       boot.
+
+o      "nfqsng" is the number of useless calls to force_quiescent_state(),
+       where there wasn't actually a grace period active.  This can
+       happen due to races.  The number in parentheses is the difference
+       between "nfqs" and "nfqsng", or the number of times that
+       force_quiescent_state() actually did some real work.
+
+o      "fqlh" is the number of calls to force_quiescent_state() that
+       exited immediately (without even being counted in nfqs above)
+       due to contention on ->fqslock.
+
+o      Each element of the form "1/1 0:127 ^0" represents one struct
+       rcu_node.  Each line represents one level of the hierarchy, from
+       root to leaves.  It is best to think of the rcu_data structures
+       as forming yet another level after the leaves.  Note that there
+       might be either one, two, or three levels of rcu_node structures,
+       depending on the relationship between CONFIG_RCU_FANOUT and
+       CONFIG_NR_CPUS.
+       
+       o       The numbers separated by the "/" are the qsmask followed
+               by the qsmaskinit.  The qsmask will have one bit
+               set for each entity in the next lower level that
+               has not yet checked in for the current grace period.
+               The qsmaskinit will have one bit for each entity that is
+               currently expected to check in during each grace period.
+               The value of qsmaskinit is assigned to that of qsmask
+               at the beginning of each grace period.
+
+               For example, for "rcu", the qsmask of the first entry
+               of the lowest level is 0x14, meaning that we are still
+               waiting for CPUs 2 and 4 to check in for the current
+               grace period.
+
+       o       The numbers separated by the ":" are the range of CPUs
+               served by this struct rcu_node.  This can be helpful
+               in working out how the hierarchy is wired together.
+
+               For example, the first entry at the lowest level shows
+               "0:5", indicating that it covers CPUs 0 through 5.
+
+       o       The number after the "^" indicates the bit in the
+               next higher level rcu_node structure that this
+               rcu_node structure corresponds to.
+
+               For example, the first entry at the lowest level shows
+               "^0", indicating that it corresponds to bit zero in
+               the first entry at the middle level.
index 4dbb8be1c991c3b4049d69ff68c99c12e5f34ce3..3c5434c83daf360d9bf36cb7f8d09727af7a5966 100644 (file)
@@ -914,7 +914,7 @@ I/O scheduler, a.k.a. elevator, is implemented in two layers.  Generic dispatch
 queue and specific I/O schedulers.  Unless stated otherwise, elevator is used
 to refer to both parts and I/O scheduler to specific I/O schedulers.
 
-Block layer implements generic dispatch queue in ll_rw_blk.c and elevator.c.
+Block layer implements generic dispatch queue in block/*.c.
 The generic dispatch queue is responsible for properly ordering barrier
 requests, requeueing, handling non-fs requests and all other subtleties.
 
@@ -926,8 +926,8 @@ be built inside the kernel.  Each queue can choose different one and can also
 change to another one dynamically.
 
 A block layer call to the i/o scheduler follows the convention elv_xxx(). This
-calls elevator_xxx_fn in the elevator switch (drivers/block/elevator.c). Oh,
-xxx and xxx might not match exactly, but use your imagination. If an elevator
+calls elevator_xxx_fn in the elevator switch (block/elevator.c). Oh, xxx
+and xxx might not match exactly, but use your imagination. If an elevator
 doesn't implement a function, the switch does nothing or some minimal house
 keeping work.
 
index 4ba4664ce5c315d024c3bc7fd515e44ac92d4653..9cb9138f7a79bcd67bf4ac2f69741e812b5a14c2 100644 (file)
@@ -71,35 +71,50 @@ Look at the current lock statistics:
 
 # less /proc/lock_stat
 
-01 lock_stat version 0.2
+01 lock_stat version 0.3
 02 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 03                               class name    con-bounces    contentions   waittime-min   waittime-max waittime-total    acq-bounces   acquisitions   holdtime-min   holdtime-max holdtime-total
 04 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 05
-06               &inode->i_data.tree_lock-W:            15          21657           0.18     1093295.30 11547131054.85             58          10415           0.16          87.51        6387.60
-07               &inode->i_data.tree_lock-R:             0              0           0.00           0.00           0.00          23302         231198           0.25           8.45       98023.38
-08               --------------------------
-09                 &inode->i_data.tree_lock              0          [<ffffffff8027c08f>] add_to_page_cache+0x5f/0x190
-10
-11 ...............................................................................................................................................................................................
-12
-13                              dcache_lock:          1037           1161           0.38          45.32         774.51           6611         243371           0.15         306.48       77387.24
-14                              -----------
-15                              dcache_lock            180          [<ffffffff802c0d7e>] sys_getcwd+0x11e/0x230
-16                              dcache_lock            165          [<ffffffff802c002a>] d_alloc+0x15a/0x210
-17                              dcache_lock             33          [<ffffffff8035818d>] _atomic_dec_and_lock+0x4d/0x70
-18                              dcache_lock              1          [<ffffffff802beef8>] shrink_dcache_parent+0x18/0x130
+06                          &mm->mmap_sem-W:           233            538 18446744073708       22924.27      607243.51           1342          45806           1.71        8595.89     1180582.34
+07                          &mm->mmap_sem-R:           205            587 18446744073708       28403.36      731975.00           1940         412426           0.58      187825.45     6307502.88
+08                          ---------------
+09                            &mm->mmap_sem            487          [<ffffffff8053491f>] do_page_fault+0x466/0x928
+10                            &mm->mmap_sem            179          [<ffffffff802a6200>] sys_mprotect+0xcd/0x21d
+11                            &mm->mmap_sem            279          [<ffffffff80210a57>] sys_mmap+0x75/0xce
+12                            &mm->mmap_sem             76          [<ffffffff802a490b>] sys_munmap+0x32/0x59
+13                          ---------------
+14                            &mm->mmap_sem            270          [<ffffffff80210a57>] sys_mmap+0x75/0xce
+15                            &mm->mmap_sem            431          [<ffffffff8053491f>] do_page_fault+0x466/0x928
+16                            &mm->mmap_sem            138          [<ffffffff802a490b>] sys_munmap+0x32/0x59
+17                            &mm->mmap_sem            145          [<ffffffff802a6200>] sys_mprotect+0xcd/0x21d
+18
+19 ...............................................................................................................................................................................................
+20
+21                              dcache_lock:           621            623           0.52         118.26        1053.02           6745          91930           0.29         316.29      118423.41
+22                              -----------
+23                              dcache_lock            179          [<ffffffff80378274>] _atomic_dec_and_lock+0x34/0x54
+24                              dcache_lock            113          [<ffffffff802cc17b>] d_alloc+0x19a/0x1eb
+25                              dcache_lock             99          [<ffffffff802ca0dc>] d_rehash+0x1b/0x44
+26                              dcache_lock            104          [<ffffffff802cbca0>] d_instantiate+0x36/0x8a
+27                              -----------
+28                              dcache_lock            192          [<ffffffff80378274>] _atomic_dec_and_lock+0x34/0x54
+29                              dcache_lock             98          [<ffffffff802ca0dc>] d_rehash+0x1b/0x44
+30                              dcache_lock             72          [<ffffffff802cc17b>] d_alloc+0x19a/0x1eb
+31                              dcache_lock            112          [<ffffffff802cbca0>] d_instantiate+0x36/0x8a
 
 This excerpt shows the first two lock class statistics. Line 01 shows the
 output version - each time the format changes this will be updated. Line 02-04
-show the header with column descriptions. Lines 05-10 and 13-18 show the actual
+show the header with column descriptions. Lines 05-18 and 20-31 show the actual
 statistics. These statistics come in two parts; the actual stats separated by a
-short separator (line 08, 14) from the contention points.
+short separator (line 08, 13) from the contention points.
 
-The first lock (05-10) is a read/write lock, and shows two lines above the
+The first lock (05-18) is a read/write lock, and shows two lines above the
 short separator. The contention points don't match the column descriptors,
-they have two: contentions and [<IP>] symbol.
+they have two: contentions and [<IP>] symbol. The second set of contention
+points are the points we're contending with.
 
+The integer part of the time values is in us.
 
 View the top contending locks:
 
index 09ff7d81809a79fe7ff04a4b29a66eb9cfe10648..d13a9694e159ca092dd6a2dc044729bd1ddc7760 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -205,13 +205,14 @@ ifeq ($(ARCH),x86_64)
         SRCARCH := x86
 endif
 
-# Where to locate arch specific headers
+# Additional ARCH settings for sparc
 ifeq ($(ARCH),sparc64)
-       hdr-arch  := sparc
-else
-       hdr-arch  := $(SRCARCH)
+       SRCARCH := sparc
 endif
 
+# Where to locate arch specific headers
+hdr-arch  := $(SRCARCH)
+
 KCONFIG_CONFIG ?= .config
 
 # SHELL used by kbuild
index e971ab000f9519c18cda4df33f26182d377aafa2..eda9b909aa05e84680d7eb0fa322c5fc4177dd1f 100644 (file)
@@ -96,9 +96,6 @@ static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page)
        return page_to_phys(page);
 }
 
-/* This depends on working iommu.  */
-#define BIO_VMERGE_BOUNDARY    (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0)
-
 /* Maximum PIO space address supported?  */
 #define IO_SPACE_LIMIT 0xffff
 
index f4e55be2eea948548837177f432e893175960391..afad9f5ac0ac2190d1cb4e6685ca479b42b542a0 100644 (file)
@@ -208,6 +208,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
                break;
        case ERR_TYPE_KERNEL_PANIC:
        default:
+               WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
                spin_unlock_irqrestore(&rtasd_log_lock, s);
                return;
        }
@@ -227,6 +228,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
        /* Check to see if we need to or have stopped logging */
        if (fatal || !logging_enabled) {
                logging_enabled = 0;
+               WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
                spin_unlock_irqrestore(&rtasd_log_lock, s);
                return;
        }
@@ -249,11 +251,13 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
                else
                        rtas_log_start += 1;
 
+               WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
                spin_unlock_irqrestore(&rtasd_log_lock, s);
                wake_up_interruptible(&rtas_log_wait);
                break;
        case ERR_TYPE_KERNEL_PANIC:
        default:
+               WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
                spin_unlock_irqrestore(&rtasd_log_lock, s);
                return;
        }
index ef3635b52fc0ce1420945fdbd42cf32845ecf671..0767827540b1778b0d88fa555d957ca457dc1d41 100644 (file)
@@ -263,7 +263,7 @@ int s390_enable_sie(void)
        /* lets check if we are allowed to replace the mm */
        task_lock(tsk);
        if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
-           tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
+           tsk->mm != tsk->active_mm || !hlist_empty(&tsk->mm->ioctx_list)) {
                task_unlock(tsk);
                return -EINVAL;
        }
@@ -279,7 +279,7 @@ int s390_enable_sie(void)
        /* Now lets check again if something happened */
        task_lock(tsk);
        if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
-           tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
+           tsk->mm != tsk->active_mm || !hlist_empty(&tsk->mm->ioctx_list)) {
                mmput(mm);
                task_unlock(tsk);
                return -EINVAL;
index e594559c8dbaa7658f56f079354e8ed25468fdcf..0a94d9c9cde1bf5d8c17a39880d5392954ae8afa 100644 (file)
 
 mainmenu "Linux/SPARC Kernel Configuration"
 
+config SPARC
+       bool
+       default y
+       select HAVE_IDE
+       select HAVE_OPROFILE
+       select HAVE_ARCH_KGDB if !SMP || SPARC64
+       select HAVE_ARCH_TRACEHOOK
+       select ARCH_WANT_OPTIONAL_GPIOLIB
+       select RTC_CLASS
+       select RTC_DRV_M48T59
+
+# Identify this as a Sparc32 build
+config SPARC32
+       bool
+       default y if ARCH = "sparc"
+       help
+         SPARC is a family of RISC microprocessors designed and marketed by
+         Sun Microsystems, incorporated.  They are very widely found in Sun
+         workstations and clones. This port covers the original 32-bit SPARC;
+         it is old and stable and usually considered one of the "big three"
+         along with the Intel and Alpha ports.  The UltraLinux project
+         maintains both the SPARC32 and SPARC64 ports; its web page is
+         available at <http://www.ultralinux.org/>.
+
+config SPARC64
+       bool
+       default y if ARCH = "sparc64"
+       select ARCH_SUPPORTS_MSI
+       select HAVE_FUNCTION_TRACER
+       select HAVE_KRETPROBES
+       select HAVE_KPROBES
+       select HAVE_LMB
+       select USE_GENERIC_SMP_HELPERS if SMP
+       select RTC_DRV_CMOS
+       select RTC_DRV_BQ4802
+       select RTC_DRV_SUN4V
+       select RTC_DRV_STARFIRE
+
+config ARCH_DEFCONFIG
+       string
+       default "arch/sparc/configs/sparc32_defconfig" if SPARC32
+       default "arch/sparc/configs/sparc64_defconfig" if SPARC64
+
+# CONFIG_BITS can be used at source level to get 32/64 bits
+config BITS
+       int
+       default 32 if SPARC32
+       default 64 if SPARC64
+
+config 64BIT
+       def_bool y if SPARC64
+
+config GENERIC_TIME
+       bool
+       default y if SPARC64
+
+config GENERIC_CMOS_UPDATE
+       bool
+       default y if SPARC64
+
+config GENERIC_CLOCKEVENTS
+       bool
+       default y if SPARC64
+
+config IOMMU_HELPER
+       bool
+       default y if SPARC64
+
+config QUICKLIST
+       bool
+       default y if SPARC64
+
+config STACKTRACE_SUPPORT
+       bool
+       default y if SPARC64
+
+config LOCKDEP_SUPPORT
+       bool
+       default y if SPARC64
+
+config HAVE_LATENCYTOP_SUPPORT
+       bool
+       default y if SPARC64
+
+config AUDIT_ARCH
+       bool
+       default y
+
+config HAVE_SETUP_PER_CPU_AREA
+       def_bool y if SPARC64
+
+config GENERIC_HARDIRQS_NO__DO_IRQ
+       bool
+       def_bool y if SPARC64
+
 config MMU
        bool
        default y
 
 config HIGHMEM
        bool
-       default y
+       default y if SPARC32
 
 config ZONE_DMA
        bool
-       default y
+       default y if SPARC32
 
 config GENERIC_ISA_DMA
        bool
-       default y
+       default y if SPARC32
 
 config GENERIC_GPIO
        bool
@@ -31,15 +126,11 @@ config ARCH_NO_VIRT_TO_BUS
 config OF
        def_bool y
 
-config HZ
-       int
-       default 100
-
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
 
-menu "General machine setup"
+menu "Processor type and features"
 
 config SMP
        bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
@@ -64,82 +155,269 @@ config SMP
          If you don't know what to do here, say N.
 
 config NR_CPUS
-       int "Maximum number of CPUs (2-32)"
-       range 2 32
+       int "Maximum number of CPUs"
        depends on SMP
-       default "32"
+       range 2 32 if SPARC32
+       range 2 1024 if SPARC64
+       default 32 if SPARC32
+       default 64 if SPARC64
 
-config SPARC
+source kernel/Kconfig.hz
+
+config RWSEM_GENERIC_SPINLOCK
+       bool
+       default y if SPARC32
+
+config RWSEM_XCHGADD_ALGORITHM
+       bool
+       default y if SPARC64
+
+config GENERIC_FIND_NEXT_BIT
        bool
        default y
-       select HAVE_IDE
-       select HAVE_OPROFILE
-       select HAVE_ARCH_KGDB if !SMP
-       select HAVE_ARCH_TRACEHOOK
-       select ARCH_WANT_OPTIONAL_GPIOLIB
-       select RTC_CLASS
-       select RTC_DRV_M48T59
 
-# Identify this as a Sparc32 build
-config SPARC32
+config GENERIC_HWEIGHT
+       bool
+       default y if !ULTRA_HAS_POPULATION_COUNT
+
+config GENERIC_CALIBRATE_DELAY
        bool
        default y
-       help
-         SPARC is a family of RISC microprocessors designed and marketed by
-         Sun Microsystems, incorporated.  They are very widely found in Sun
-         workstations and clones. This port covers the original 32-bit SPARC;
-         it is old and stable and usually considered one of the "big three"
-         along with the Intel and Alpha ports.  The UltraLinux project
-         maintains both the SPARC32 and SPARC64 ports; its web page is
-         available at <http://www.ultralinux.org/>.
 
-# Global things across all Sun machines.
-config ISA
+config ARCH_MAY_HAVE_PC_FDC
        bool
-       help
-         ISA is found on Espresso only and is not supported currently.
-         Say N
+       default y
 
-config EISA
+config ARCH_HAS_ILOG2_U32
        bool
+       default n
+
+config ARCH_HAS_ILOG2_U64
+       bool
+       default n
+
+config EMULATED_CMPXCHG
+       bool
+       default y if SPARC32
        help
-         EISA is not supported.
-         Say N
+         Sparc32 does not have a CAS instruction like sparc64. cmpxchg()
+         is emulated, and therefore it is not completely atomic.
 
-config MCA
+# Makefile helpers
+config SPARC32_SMP
+       bool
+       default y
+       depends on SPARC32 && SMP
+
+config SPARC64_SMP
        bool
+       default y
+       depends on SPARC64 && SMP
+
+choice
+       prompt "Kernel page size" if SPARC64
+       default SPARC64_PAGE_SIZE_8KB
+
+config SPARC64_PAGE_SIZE_8KB
+       bool "8KB"
        help
-         MCA is not supported.
-         Say N
+         This lets you select the page size of the kernel.
 
-config PCMCIA
-       tristate
-       ---help---
-         Say Y here if you want to attach PCMCIA- or PC-cards to your Linux
-         computer.  These are credit-card size devices such as network cards,
-         modems or hard drives often used with laptops computers.  There are
-         actually two varieties of these cards: the older 16 bit PCMCIA cards
-         and the newer 32 bit CardBus cards.  If you want to use CardBus
-         cards, you need to say Y here and also to "CardBus support" below.
+         8KB and 64KB work quite well, since SPARC ELF sections
+         provide for up to 64KB alignment.
 
-         To use your PC-cards, you will need supporting software from David
-         Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
-         for location).  Please also read the PCMCIA-HOWTO, available from
-         <http://www.tldp.org/docs.html#howto>.
+         If you don't know what to do, choose 8KB.
 
-         To compile this driver as modules, choose M here: the
-         modules will be called pcmcia_core and ds.
+config SPARC64_PAGE_SIZE_64KB
+       bool "64KB"
 
-config SBUS
+endchoice
+
+config SECCOMP
+       bool "Enable seccomp to safely compute untrusted bytecode"
+       depends on SPARC64 && PROC_FS
+       default y
+       help
+         This kernel feature is useful for number crunching applications
+         that may need to compute untrusted bytecode during their
+         execution. By using pipes or other transports made available to
+         the process as file descriptors supporting the read/write
+         syscalls, it's possible to isolate those applications in
+         their own address space using seccomp. Once seccomp is
+         enabled via /proc/<pid>/seccomp, it cannot be disabled
+         and the task is only allowed to execute a few safe syscalls
+         defined by each seccomp mode.
+
+         If unsure, say Y. Only embedded should say N here.
+
+config HOTPLUG_CPU
+       bool "Support for hot-pluggable CPUs"
+       depends on SPARC64 && SMP
+       select HOTPLUG
+       help
+         Say Y here to experiment with turning CPUs off and on.  CPUs
+         can be controlled through /sys/devices/system/cpu/cpu#.
+         Say N if you want to disable CPU hotplug.
+
+config GENERIC_HARDIRQS
        bool
+       default y if SPARC64
+
+source "kernel/time/Kconfig"
+
+if SPARC64
+source "drivers/cpufreq/Kconfig"
+
+config US3_FREQ
+       tristate "UltraSPARC-III CPU Frequency driver"
+       depends on CPU_FREQ
+       select CPU_FREQ_TABLE
+       help
+         This adds the CPUFreq driver for UltraSPARC-III processors.
+
+         For details, take a look at <file:Documentation/cpu-freq>.
+
+         If in doubt, say N.
+
+config US2E_FREQ
+       tristate "UltraSPARC-IIe CPU Frequency driver"
+       depends on CPU_FREQ
+       select CPU_FREQ_TABLE
+       help
+         This adds the CPUFreq driver for UltraSPARC-IIe processors.
+
+         For details, take a look at <file:Documentation/cpu-freq>.
+
+         If in doubt, say N.
+
+endif
+
+config US3_MC
+       tristate "UltraSPARC-III Memory Controller driver"
+       depends on SPARC64
        default y
+       help
+         This adds a driver for the UltraSPARC-III memory controller.
+         Loading this driver allows exact mnemonic strings to be
+         printed in the event of a memory error, so that the faulty DIMM
+         on the motherboard can be matched to the error.
 
-config SBUSCHAR
+         If in doubt, say Y, as this information can be very useful.
+
+# Global things across all Sun machines.
+config GENERIC_LOCKBREAK
        bool
        default y
+       depends on SPARC64 && SMP && PREEMPT
+
+choice
+       prompt "SPARC64 Huge TLB Page Size"
+       depends on SPARC64 && HUGETLB_PAGE
+       default HUGETLB_PAGE_SIZE_4MB
+
+config HUGETLB_PAGE_SIZE_4MB
+       bool "4MB"
+
+config HUGETLB_PAGE_SIZE_512K
+       bool "512K"
+
+config HUGETLB_PAGE_SIZE_64K
+       depends on !SPARC64_PAGE_SIZE_64KB
+       bool "64K"
+
+endchoice
+
+config NUMA
+       bool "NUMA support"
+       depends on SPARC64 && SMP
+
+config NODES_SHIFT
+       int
+       default "4"
+       depends on NEED_MULTIPLE_NODES
+
+# Some NUMA nodes have memory ranges that span
+# other nodes.  Even though a pfn is valid and
+# between a node's start and end pfns, it may not
+# reside on that node.  See memmap_init_zone()
+# for details.
+config NODES_SPAN_OTHER_NODES
+       def_bool y
+       depends on NEED_MULTIPLE_NODES
+
+config ARCH_POPULATES_NODE_MAP
+       def_bool y if SPARC64
+
+config ARCH_SELECT_MEMORY_MODEL
+       def_bool y if SPARC64
+
+config ARCH_SPARSEMEM_ENABLE
+       def_bool y if SPARC64
+       select SPARSEMEM_VMEMMAP_ENABLE
+
+config ARCH_SPARSEMEM_DEFAULT
+       def_bool y if SPARC64
+
+source "mm/Kconfig"
+
+config SCHED_SMT
+       bool "SMT (Hyperthreading) scheduler support"
+       depends on SPARC64 && SMP
+       default y
+       help
+         SMT scheduler support improves the CPU scheduler's decision making
+         when dealing with SPARC cpus at a cost of slightly increased overhead
+         in some places. If unsure say N here.
+
+config SCHED_MC
+       bool "Multi-core scheduler support"
+       depends on SPARC64 && SMP
+       default y
+       help
+         Multi-core scheduler support improves the CPU scheduler's decision
+         making when dealing with multi-core CPU chips at a cost of slightly
+         increased overhead in some places. If unsure say N here.
+
+if SPARC64
+source "kernel/Kconfig.preempt"
+endif
+
+config CMDLINE_BOOL
+       bool "Default bootloader kernel arguments"
+       depends on SPARC64
+
+config CMDLINE
+       string "Initial kernel command string"
+       depends on CMDLINE_BOOL
+       default "console=ttyS0,9600 root=/dev/sda1"
+       help
+         Say Y here if you want to be able to pass default arguments to
+         the kernel. This will be overridden by the bootloader, if you
+         use one (such as SILO). This is most useful if you want to boot
+         a kernel from TFTP, and want default options to be available
+         with having them passed on the command line.
+
+         NOTE: This option WILL override the PROM bootargs setting!
+
+config SUN_PM
+       bool
+       default y if SPARC32
+       help
+         Enable power management and CPU standby features on supported
+         SPARC platforms.
+
+config SPARC_LED
+       tristate "Sun4m LED driver"
+       depends on SPARC32
+       help
+         This driver toggles the front-panel LED on sun4m systems
+         in a user-specifiable manner.  Its state can be probed
+         by reading /proc/led and its blinking mode can be changed
+         via writes to /proc/led
 
 config SERIAL_CONSOLE
        bool
+       depends on SPARC32
        default y
        ---help---
          If you say Y here, it will be possible to use a serial port as the
@@ -161,71 +439,66 @@ config SERIAL_CONSOLE
 
          If unsure, say N.
 
-config SUN_AUXIO
-       bool
-       default y
-
-config SUN_IO
-       bool
-       default y
-
-config RWSEM_GENERIC_SPINLOCK
-       bool
-       default y
+endmenu
 
-config RWSEM_XCHGADD_ALGORITHM
+menu "Bus options (PCI etc.)"
+config ISA
        bool
+       help
+         ISA is found on Espresso only and is not supported currently.
 
-config GENERIC_FIND_NEXT_BIT
+config ISAPNP
        bool
-       default y
+       help
+         ISAPNP is not supported
 
-config GENERIC_HWEIGHT
+config EISA
        bool
-       default y
+       help
+         EISA is not supported.
 
-config GENERIC_CALIBRATE_DELAY
+config MCA
        bool
-       default y
+       help
+         MCA is not supported.
 
-config ARCH_MAY_HAVE_PC_FDC
+config SBUS
        bool
        default y
 
-config ARCH_HAS_ILOG2_U32
-       bool
-       default n
-
-config ARCH_HAS_ILOG2_U64
-       bool
-       default n
-
-config EMULATED_CMPXCHG
+config SBUSCHAR
        bool
        default y
-       help
-         Sparc32 does not have a CAS instruction like sparc64. cmpxchg()
-         is emulated, and therefore it is not completely atomic.
 
-config SUN_PM
-       bool
-       default y
+config SUN_LDOMS
+       bool "Sun Logical Domains support"
+       depends on SPARC64
        help
-         Enable power management and CPU standby features on supported
-         SPARC platforms.
+         Say Y here is you want to support virtual devices via
+         Logical Domains.
 
 config PCI
        bool "Support for PCI and PS/2 keyboard/mouse"
        help
+         Find out whether your system includes a PCI bus. PCI is the name of
+         a bus system, i.e. the way the CPU talks to the other stuff inside
+         your box.  If you say Y here, the kernel will include drivers and
+         infrastructure code to support PCI bus devices.
+
          CONFIG_PCI is needed for all JavaStation's (including MrCoffee),
          CP-1200, JavaEngine-1, Corona, Red October, and Serengeti SGSC.
          All of these platforms are extremely obscure, so say N if unsure.
 
+config PCI_DOMAINS
+       def_bool PCI if SPARC64
+
 config PCI_SYSCALL
        def_bool PCI
 
 source "drivers/pci/Kconfig"
 
+source "drivers/pcmcia/Kconfig"
+
 config SUN_OPENPROMFS
        tristate "Openprom tree appears in /proc/openprom"
        help
@@ -239,17 +512,33 @@ config SUN_OPENPROMFS
          Only choose N if you know in advance that you will not need to modify
          OpenPROM settings on the running system.
 
-config SPARC_LED
-       tristate "Sun4m LED driver"
-       help
-         This driver toggles the front-panel LED on sun4m systems
-         in a user-specifiable manner.  Its state can be probed
-         by reading /proc/led and its blinking mode can be changed
-         via writes to /proc/led
+# Makefile helpers
+config SPARC32_PCI
+       bool
+       default y
+       depends on SPARC32 && PCI
+
+config SPARC64_PCI
+       bool
+       default y
+       depends on SPARC64 && PCI
+
+endmenu
+
+menu "Executable file formats"
 
 source "fs/Kconfig.binfmt"
 
-source "mm/Kconfig"
+config COMPAT
+       bool
+       depends on SPARC64
+       default y
+       select COMPAT_BINFMT_ELF
+
+config SYSVIPC_COMPAT
+       bool
+       depends on COMPAT && SYSVIPC
+       default y
 
 endmenu
 
@@ -259,40 +548,6 @@ source "drivers/Kconfig"
 
 source "drivers/sbus/char/Kconfig"
 
-# This one must be before the filesystem configs. -DaveM
-
-menu "Unix98 PTY support"
-
-config UNIX98_PTYS
-       bool "Unix98 PTY support"
-       ---help---
-         A pseudo terminal (PTY) is a software device consisting of two
-         halves: a master and a slave. The slave device behaves identical to
-         a physical terminal; the master device is used by a process to
-         read data from and write data to the slave, thereby emulating a
-         terminal. Typical programs for the master side are telnet servers
-         and xterms.
-
-         Linux has traditionally used the BSD-like names /dev/ptyxx for
-         masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
-         has a number of problems. The GNU C library glibc 2.1 and later,
-         however, supports the Unix98 naming standard: in order to acquire a
-         pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
-         terminal is then made available to the process and the pseudo
-         terminal slave can be accessed as /dev/pts/<number>. What was
-         traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
-
-         The entries in /dev/pts/ are created on the fly by a virtual
-         file system; therefore, if you say Y here you should say Y to
-         "/dev/pts file system for Unix98 PTYs" as well.
-
-         If you want to say Y here, you need to have the C library glibc 2.1
-         or later (equal to libc-6.1, check with "ls -l /lib/libc.so.*").
-         Read the instructions in <file:Documentation/Changes> pertaining to
-         pseudo terminals. It's safe to say N.
-
-endmenu
-
 source "fs/Kconfig"
 
 source "arch/sparc/Kconfig.debug"
index 87dd496f15eb1bc2eb60318b99e0c75903261dd6..b8a15e271bfaacc2e6ce0e789fcb501b792f27b9 100644 (file)
@@ -15,4 +15,30 @@ config DEBUG_STACK_USAGE
 
          This option will slow down process creation somewhat.
 
+config DEBUG_DCFLUSH
+       bool "D-cache flush debugging"
+       depends on SPARC64 && DEBUG_KERNEL
+
+config STACK_DEBUG
+       bool "Stack Overflow Detection Support"
+
+config DEBUG_PAGEALLOC
+       bool "Debug page memory allocations"
+       depends on SPARC64 && DEBUG_KERNEL && !HIBERNATION
+       help
+         Unmap pages from the kernel linear mapping after free_pages().
+         This results in a large slowdown, but helps to find certain types
+         of memory corruptions.
+
+config MCOUNT
+       bool
+       depends on SPARC64
+       depends on STACK_DEBUG || FUNCTION_TRACER
+       default y
+
+config FRAME_POINTER
+       bool
+       depends on MCOUNT
+       default y
+
 endmenu
index 9592889a6fd0e11ef797e138b4be58f94b27bd30..2003ded054c25c779e2f19422acf130b61b7213c 100644 (file)
@@ -2,18 +2,31 @@
 # sparc/Makefile
 #
 # Makefile for the architecture dependent flags and dependencies on the
-# Sparc.
+# Sparc and sparc64.
 #
-# Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
+# Copyright (C) 1994,1996,1998 David S. Miller (davem@caip.rutgers.edu)
+# Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
+
+# We are not yet configured - so test on arch
+ifeq ($(ARCH),sparc)
+        KBUILD_DEFCONFIG := sparc32_defconfig
+else
+        KBUILD_DEFCONFIG := sparc64_defconfig
+endif
+
+ifeq ($(CONFIG_SPARC32),y)
+#####
+# sparc32
 #
 
 #
 # Uncomment the first KBUILD_CFLAGS if you are doing kgdb source level
 # debugging of the kernel to get the proper debugging information.
 
-AS              := $(AS) -32
-LDFLAGS                := -m elf32_sparc
-CHECKFLAGS     += -D__sparc__
+AS             := $(AS) -32
+LDFLAGS        := -m elf32_sparc
+CHECKFLAGS     += -D__sparc__
+export BITS    := 32
 
 #KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7
 KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
@@ -25,38 +38,60 @@ CPPFLAGS_vmlinux.lds += -m32
 #  Actual linking is done with "make image".
 LDFLAGS_vmlinux = -r
 
-head-y := arch/sparc/kernel/head.o arch/sparc/kernel/init_task.o
-HEAD_Y := $(head-y)
+# Default target
+all: zImage
+
+
+else
+#####
+# sparc64
+#
 
-core-y += arch/sparc/kernel/ arch/sparc/mm/ arch/sparc/math-emu/
-libs-y += arch/sparc/prom/ arch/sparc/lib/
+CHECKFLAGS      += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64
+
+# Undefine sparc when processing vmlinux.lds - it is used
+# And teach CPP we are doing 64 bit builds (for this case)
+CPPFLAGS_vmlinux.lds += -m64 -Usparc
+LDFLAGS              := -m elf64_sparc
+export BITS          := 64
+
+KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow   \
+                 -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare \
+                 -Wa,--undeclared-regs
+KBUILD_CFLAGS += $(call cc-option,-mtune=ultrasparc3)
+KBUILD_AFLAGS += -m64 -mcpu=ultrasparc -Wa,--undeclared-regs
+
+ifeq ($(CONFIG_MCOUNT),y)
+  KBUILD_CFLAGS += -pg
+endif
+
+endif
+
+head-y                 := arch/sparc/kernel/head_$(BITS).o
+head-y                 += arch/sparc/kernel/init_task.o
+
+core-y                 += arch/sparc/kernel/
+core-y                 += arch/sparc/mm/ arch/sparc/math-emu/
+
+libs-y                 += arch/sparc/prom/
+libs-y                 += arch/sparc/lib/
 
 drivers-$(CONFIG_OPROFILE)     += arch/sparc/oprofile/
 
 # Export what is needed by arch/sparc/boot/Makefile
-# Renaming is done to avoid confusing pattern matching rules in 2.5.45 (multy-)
-INIT_Y         := $(patsubst %/, %/built-in.o, $(init-y))
-CORE_Y         := $(core-y)
-CORE_Y         += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
-CORE_Y         := $(patsubst %/, %/built-in.o, $(CORE_Y))
-DRIVERS_Y      := $(patsubst %/, %/built-in.o, $(drivers-y))
-NET_Y          := $(patsubst %/, %/built-in.o, $(net-y))
-LIBS_Y1                := $(patsubst %/, %/lib.a, $(libs-y))
-LIBS_Y2                := $(patsubst %/, %/built-in.o, $(libs-y))
-LIBS_Y         := $(LIBS_Y1) $(LIBS_Y2)
+export VMLINUX_INIT VMLINUX_MAIN
+VMLINUX_INIT := $(head-y) $(init-y)
+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
+VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
+VMLINUX_MAIN += $(drivers-y) $(net-y)
 
 ifdef CONFIG_KALLSYMS
-kallsyms.o := .tmp_kallsyms2.o
+export kallsyms.o := .tmp_kallsyms2.o
 endif
 
-export INIT_Y CORE_Y DRIVERS_Y NET_Y LIBS_Y HEAD_Y kallsyms.o
-
-# Default target
-all: zImage
-
 boot := arch/sparc/boot
 
-image zImage tftpboot.img: vmlinux
+image zImage tftpboot.img vmlinux.aout: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
 archclean:
@@ -65,11 +100,17 @@ archclean:
 # This is the image used for packaging
 KBUILD_IMAGE := $(boot)/zImage
 
-CLEAN_FILES += arch/$(ARCH)/boot/System.map
-
 # Don't use tabs in echo arguments.
+ifeq ($(ARCH),sparc)
 define archhelp
   echo  '* image        - kernel image ($(boot)/image)'
   echo  '* zImage       - stripped kernel image ($(boot)/zImage)'
   echo  '  tftpboot.img - image prepared for tftp'
 endef
+else
+define archhelp
+  echo  '* vmlinux       - Standard sparc64 kernel'
+  echo  '  vmlinux.aout  - a.out kernel for sparc64'
+  echo  '  tftpboot.img - image prepared for tftp'
+endef
+endif
similarity index 60%
rename from arch/sparc64/boot/.gitignore
rename to arch/sparc/boot/.gitignore
index 36356f9d498e5299320875ed1561a70cc62a0241..fc6f3986c76c43358ad66737e61d7169a7c5c65c 100644 (file)
@@ -1,4 +1,8 @@
+btfix.S
+btfixupprep
 image
+zImage
 tftpboot.img
 vmlinux.aout
 piggyback
+
index 3e77a9f522489c6a7c1a4d85c17dc060da20a771..96041a8d39e8cf2e01695c0536502440e43e0f0d 100644 (file)
@@ -6,13 +6,16 @@
 ROOT_IMG       := /usr/src/root.img
 ELFTOAOUT      := elftoaout
 
-hostprogs-y    := piggyback btfixupprep
-targets                := tftpboot.img btfix.o btfix.S image
+hostprogs-y    := piggyback_32 piggyback_64 btfixupprep
+targets                := tftpboot.img btfix.o btfix.S image zImage vmlinux.aout
+clean-files    := System.map
 
 quiet_cmd_elftoaout    = ELFTOAOUT $@
       cmd_elftoaout    = $(ELFTOAOUT) $(obj)/image -o $@
+
+ifeq ($(CONFIG_SPARC32),y)
 quiet_cmd_piggy                = PIGGY   $@
-      cmd_piggy                = $(obj)/piggyback $@ $(obj)/System.map $(ROOT_IMG)
+      cmd_piggy                = $(obj)/piggyback_32 $@ $(obj)/System.map $(ROOT_IMG)
 quiet_cmd_btfix                = BTFIX   $@
       cmd_btfix                = $(OBJDUMP) -x vmlinux | $(obj)/btfixupprep > $@
 quiet_cmd_sysmap        = SYSMAP  $(obj)/System.map
@@ -37,8 +40,8 @@ define rule_image
        echo 'cmd_$@ := $(cmd_image)' > $(@D)/.$(@F).cmd
 endef
 
-BTOBJS := $(HEAD_Y) $(INIT_Y)
-BTLIBS := $(CORE_Y) $(LIBS_Y) $(DRIVERS_Y) $(NET_Y)
+BTOBJS := $(patsubst %/, %/built-in.o, $(VMLINUX_INIT))
+BTLIBS := $(patsubst %/, %/built-in.o, $(VMLINUX_MAIN))
 LDFLAGS_image := -T arch/sparc/kernel/vmlinux.lds $(BTOBJS) \
                   --start-group $(BTLIBS) --end-group \
                   $(kallsyms.o) $(obj)/btfix.o
@@ -61,3 +64,28 @@ $(obj)/tftpboot.img: $(obj)/piggyback $(obj)/System.map $(obj)/image FORCE
 
 $(obj)/btfix.S: $(obj)/btfixupprep vmlinux FORCE
        $(call if_changed,btfix)
+
+endif
+
+ifeq ($(CONFIG_SPARC64),y)
+quiet_cmd_piggy     = PIGGY   $@
+      cmd_piggy     = $(obj)/piggyback_64 $@ System.map $(ROOT_IMG)
+quiet_cmd_strip     = STRIP   $@
+      cmd_strip     = $(STRIP) -R .comment -R .note -K sun4u_init -K _end -K _start vmlinux -o $@
+
+
+# Actual linking
+$(obj)/image: vmlinux FORCE
+       $(call if_changed,strip)
+       @echo '  kernel: $@ is ready'
+
+$(obj)/tftpboot.img: vmlinux $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE
+       $(call if_changed,elftoaout)
+       $(call if_changed,piggy)
+       @echo '  kernel: $@ is ready'
+
+$(obj)/vmlinux.aout: vmlinux FORCE
+       $(call if_changed,elftoaout)
+       @echo '  kernel: $@ is ready'
+endif
+
index 2d2769d766ec7cacc827737bdbea9d73e139cfce..89c260aab45c83fd503119114095f93f2589ddc0 100644 (file)
@@ -15,8 +15,6 @@ header-y += signal_32.h
 header-y += signal_64.h
 header-y += stat_32.h
 header-y += stat_64.h
-header-y += unistd_32.h
-header-y += unistd_64.h
 
 header-y += apc.h
 header-y += asi.h
diff --git a/arch/sparc/include/asm/asm.h b/arch/sparc/include/asm/asm.h
new file mode 100644 (file)
index 0000000..e8e1d94
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef _SPARC_ASM_H
+#define _SPARC_ASM_H
+
+/* Macros to assist the sharing of assembler code between 32-bit and
+ * 64-bit sparc.
+ */
+
+#ifdef CONFIG_SPARC64
+#define BRANCH32(TYPE, PREDICT, DEST) \
+       TYPE,PREDICT    %icc, DEST
+#define BRANCH32_ANNUL(TYPE, PREDICT, DEST) \
+       TYPE,a,PREDICT  %icc, DEST
+#define BRANCH_REG_ZERO(PREDICT, REG, DEST) \
+       brz,PREDICT     REG, DEST
+#define BRANCH_REG_ZERO_ANNUL(PREDICT, REG, DEST) \
+       brz,a,PREDICT   REG, DEST
+#define BRANCH_REG_NOT_ZERO(PREDICT, REG, DEST) \
+       brnz,PREDICT    REG, DEST
+#define BRANCH_REG_NOT_ZERO_ANNUL(PREDICT, REG, DEST) \
+       brnz,a,PREDICT  REG, DEST
+#else
+#define BRANCH32(TYPE, PREDICT, DEST) \
+       TYPE            DEST
+#define BRANCH32_ANNUL(TYPE, PREDICT, DEST) \
+       TYPE,a          DEST
+#define BRANCH_REG_ZERO(PREDICT, REG, DEST) \
+       cmp             REG, 0; \
+       be              DEST
+#define BRANCH_REG_ZERO_ANNUL(PREDICT, REG, DEST) \
+       cmp             REG, 0; \
+       be,a            DEST
+#define BRANCH_REG_NOT_ZERO(PREDICT, REG, DEST) \
+       cmp             REG, 0; \
+       bne             DEST
+#define BRANCH_REG_NOT_ZERO_ANNUL(PREDICT, REG, DEST) \
+       cmp             REG, 0; \
+       bne,a           DEST
+#endif
+
+#endif /* _SPARC_ASM_H */
index 2c71ec4a3b180930f53bcbec979527d3f28d3f97..5982c5ae7f0799e9159cd89096e80e99d600c8fe 100644 (file)
@@ -112,17 +112,10 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
 /* Atomic operations are already serializing */
-#ifdef CONFIG_SMP
-#define smp_mb__before_atomic_dec()    membar_storeload_loadload();
-#define smp_mb__after_atomic_dec()     membar_storeload_storestore();
-#define smp_mb__before_atomic_inc()    membar_storeload_loadload();
-#define smp_mb__after_atomic_inc()     membar_storeload_storestore();
-#else
 #define smp_mb__before_atomic_dec()    barrier()
 #define smp_mb__after_atomic_dec()     barrier()
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
-#endif
 
 #include <asm-generic/atomic.h>
 #endif /* !(__ARCH_SPARC64_ATOMIC__) */
index bb87b80802200090d8f1e7476bd735d6a23d1b3f..e72ac9cdfb982eb307367a7cd1d8f8f9914efaa4 100644 (file)
@@ -23,13 +23,8 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
 
 #include <asm-generic/bitops/non-atomic.h>
 
-#ifdef CONFIG_SMP
-#define smp_mb__before_clear_bit()     membar_storeload_loadload()
-#define smp_mb__after_clear_bit()      membar_storeload_storestore()
-#else
 #define smp_mb__before_clear_bit()     barrier()
 #define smp_mb__after_clear_bit()      barrier()
-#endif
 
 #include <asm-generic/bitops/ffz.h>
 #include <asm-generic/bitops/__ffs.h>
index 109ae24ba242c3f24a7861d15580c0c62045709b..bafe5a631b6d1a4e46653c7f6fa01f4b80603a1f 100644 (file)
@@ -2713,6 +2713,30 @@ extern unsigned long sun4v_ldc_revoke(unsigned long channel,
  */
 #define HV_FAST_SET_PERFREG            0x101
 
+#define HV_N2_PERF_SPARC_CTL           0x0
+#define HV_N2_PERF_DRAM_CTL0           0x1
+#define HV_N2_PERF_DRAM_CNT0           0x2
+#define HV_N2_PERF_DRAM_CTL1           0x3
+#define HV_N2_PERF_DRAM_CNT1           0x4
+#define HV_N2_PERF_DRAM_CTL2           0x5
+#define HV_N2_PERF_DRAM_CNT2           0x6
+#define HV_N2_PERF_DRAM_CTL3           0x7
+#define HV_N2_PERF_DRAM_CNT3           0x8
+
+#define HV_FAST_N2_GET_PERFREG         0x104
+#define HV_FAST_N2_SET_PERFREG         0x105
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_niagara_getperf(unsigned long reg,
+                                          unsigned long *val);
+extern unsigned long sun4v_niagara_setperf(unsigned long reg,
+                                          unsigned long val);
+extern unsigned long sun4v_niagara2_getperf(unsigned long reg,
+                                           unsigned long *val);
+extern unsigned long sun4v_niagara2_setperf(unsigned long reg,
+                                           unsigned long val);
+#endif
+
 /* MMU statistics services.
  *
  * The hypervisor maintains MMU statistics and privileged code provides
index fe205cc444b889619e82612ac3ba7ed00302035a..ea43057d47633164b0100367a01f4c9f3ff67e12 100644 (file)
@@ -12,4 +12,5 @@
 
 #define irq_canonicalize(irq)  (irq)
 
+extern void __init init_IRQ(void);
 #endif
index 71673eca3660be0dcce7359c88aa9edcae0e7ee1..d47d4a1955a974169b8a9bf432915c5d4263807c 100644 (file)
@@ -66,6 +66,9 @@ extern void virt_irq_free(unsigned int virt_irq);
 extern void __init init_IRQ(void);
 extern void fixup_irqs(void);
 
+extern int register_perfctr_intr(void (*handler)(struct pt_regs *));
+extern void release_perfctr_intr(void (*handler)(struct pt_regs *));
+
 static inline void set_softint(unsigned long bits)
 {
        __asm__ __volatile__("wr        %0, 0x0, %%set_softint"
index bb42e59162aab85829566a369255a55ce67180aa..8b49bf920df3b11a1d7d9b661891c5d0918a11de 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef _ASM_IRQFLAGS_H
 #define _ASM_IRQFLAGS_H
 
+#include <asm/pil.h>
+
 #ifndef __ASSEMBLY__
 
 static inline unsigned long __raw_local_save_flags(void)
@@ -40,9 +42,9 @@ static inline void raw_local_irq_restore(unsigned long flags)
 static inline void raw_local_irq_disable(void)
 {
        __asm__ __volatile__(
-               "wrpr   15, %%pil"
+               "wrpr   %0, %%pil"
                : /* no outputs */
-               : /* no inputs */
+               : "i" (PIL_NORMAL_MAX)
                : "memory"
        );
 }
index e82cf9a3e60ee53e22ba05abdb0ef2a024e8b487..ff8e02d803344f4548b05854df2f3c6fd3a086cf 100644 (file)
@@ -1,8 +1,24 @@
-#ifndef ___ASM_SPARC_MODULE_H
-#define ___ASM_SPARC_MODULE_H
-#if defined(__sparc__) && defined(__arch64__)
-#include <asm/module_64.h>
-#else
-#include <asm/module_32.h>
-#endif
-#endif
+#ifndef __SPARC_MODULE_H
+#define __SPARC_MODULE_H
+struct mod_arch_specific { };
+
+/*
+ * Use some preprocessor magic to define the correct symbol
+ * for sparc32 and sparc64.
+ * Elf_Addr becomes Elf32_Addr for sparc32 and Elf64_Addr for sparc64
+ */
+#define ___ELF(a, b, c) a##b##c
+#define __ELF(a, b, c)  ___ELF(a, b, c)
+#define  _Elf(t)        __ELF(Elf, CONFIG_BITS, t)
+#define  _ELF(t)        __ELF(ELF, CONFIG_BITS, t)
+
+#define Elf_Shdr     _Elf(_Shdr)
+#define Elf_Sym      _Elf(_Sym)
+#define Elf_Ehdr     _Elf(_Ehdr)
+#define Elf_Rela     _Elf(_Rela)
+#define Elf_Addr     _Elf(_Addr)
+
+#define ELF_R_SYM    _ELF(_R_SYM)
+#define ELF_R_TYPE   _ELF(_R_TYPE)
+
+#endif /* __SPARC_MODULE_H */
diff --git a/arch/sparc/include/asm/module_32.h b/arch/sparc/include/asm/module_32.h
deleted file mode 100644 (file)
index cbd9e67..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _ASM_SPARC_MODULE_H
-#define _ASM_SPARC_MODULE_H
-struct mod_arch_specific { };
-#define Elf_Shdr Elf32_Shdr
-#define Elf_Sym Elf32_Sym
-#define Elf_Ehdr Elf32_Ehdr
-#endif /* _ASM_SPARC_MODULE_H */
diff --git a/arch/sparc/include/asm/module_64.h b/arch/sparc/include/asm/module_64.h
deleted file mode 100644 (file)
index 3d77ba4..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _ASM_SPARC64_MODULE_H
-#define _ASM_SPARC64_MODULE_H
-struct mod_arch_specific { };
-#define Elf_Shdr Elf64_Shdr
-#define Elf_Sym Elf64_Sym
-#define Elf_Ehdr Elf64_Ehdr
-#endif /* _ASM_SPARC64_MODULE_H */
index 8b1649f29ed954439d996a8bbd337a9d2aa1c482..875da3552d80e7d415182957f95ef8b77c111c28 100644 (file)
@@ -170,9 +170,9 @@ struct linux_romvec {
 struct linux_nodeops {
        int (*no_nextnode)(int node);
        int (*no_child)(int node);
-       int (*no_proplen)(int node, char *name);
-       int (*no_getprop)(int node, char *name, char *val);
-       int (*no_setprop)(int node, char *name, char *val, int len);
+       int (*no_proplen)(int node, const char *name);
+       int (*no_getprop)(int node, const char *name, char *val);
+       int (*no_setprop)(int node, const char *name, char *val, int len);
        char * (*no_nextprop)(int node, char *name);
 };
 
index 699da05235c83f5f8c6afa4415775f17df7340e0..73d45521db04f8fb60584ef2a35c0a351ac1023a 100644 (file)
@@ -136,7 +136,7 @@ extern char prom_getchar(void);
 extern void prom_putchar(char character);
 
 /* Prom's internal routines, don't use in kernel/boot code. */
-extern void prom_printf(char *fmt, ...);
+extern void prom_printf(const char *fmt, ...);
 extern void prom_write(const char *buf, unsigned int len);
 
 /* Multiprocessor operations... */
@@ -199,12 +199,12 @@ extern int prom_getsibling(int node);
 /* Get the length, at the passed node, of the given property type.
  * Returns -1 on error (ie. no such property at this node).
  */
-extern int prom_getproplen(int thisnode, char *property);
+extern int prom_getproplen(int thisnode, const char *property);
 
 /* Fetch the requested property using the given buffer.  Returns
  * the number of bytes the prom put into your buffer or -1 on error.
  */
-extern int __must_check prom_getproperty(int thisnode, char *property,
+extern int __must_check prom_getproperty(int thisnode, const char *property,
                                         char *prop_buffer, int propbuf_size);
 
 /* Acquire an integer property. */
@@ -246,7 +246,7 @@ extern int prom_node_has_property(int node, char *property);
 /* Set the indicated property at the given node with the passed value.
  * Returns the number of bytes of your value that the prom took.
  */
-extern int prom_setprop(int node, char *prop_name, char *prop_value,
+extern int prom_setprop(int node, const char *prop_name, char *prop_value,
                        int value_size);
 
 extern int prom_pathtoinode(char *path);
index 71819bb943fc7e863f0a82cd8e4ff11f2b324c74..d573820c0ff4fc9bc766687174acdde78d664700 100644 (file)
  *
  * In fact any XCALL which has to etrap/rtrap has a problem because
  * it is difficult to prevent rtrap from running BH's, and that would
- * need to be done if the XCALL arrived while %pil==15.
+ * need to be done if the XCALL arrived while %pil==PIL_NORMAL_MAX.
+ *
+ * Finally, in order to handle profiling events even when a
+ * local_irq_disable() is in progress, we only disable up to level 14
+ * interrupts.  Profile counter overflow interrupts arrive at level
+ * 15.
  */
 #define PIL_SMP_CALL_FUNC      1
 #define PIL_SMP_RECEIVE_SIGNAL 2
@@ -18,5 +23,7 @@
 #define PIL_SMP_CTX_NEW_VERSION        4
 #define PIL_DEVICE_IRQ         5
 #define PIL_SMP_CALL_FUNC_SNGL 6
+#define PIL_NORMAL_MAX         14
+#define PIL_NMI                        15
 
 #endif /* !(_SPARC64_PIL_H) */
index ec21a4517641b16b8414baab15f18a55a4363e27..e580f5581c8863ce60ad9d6638f15c039f31ce30 100644 (file)
@@ -1,8 +1,27 @@
-#ifndef ___ASM_SPARC_SCATTERLIST_H
-#define ___ASM_SPARC_SCATTERLIST_H
-#if defined(__sparc__) && defined(__arch64__)
-#include <asm/scatterlist_64.h>
-#else
-#include <asm/scatterlist_32.h>
-#endif
+#ifndef _SPARC_SCATTERLIST_H
+#define _SPARC_SCATTERLIST_H
+
+#include <asm/page.h>
+#include <asm/types.h>
+
+struct scatterlist {
+#ifdef CONFIG_DEBUG_SG
+       unsigned long   sg_magic;
 #endif
+       unsigned long   page_link;
+       unsigned int    offset;
+
+       unsigned int    length;
+
+       dma_addr_t      dma_address;
+       __u32           dma_length;
+};
+
+#define sg_dma_address(sg)     ((sg)->dma_address)
+#define sg_dma_len(sg)         ((sg)->dma_length)
+
+#define ISA_DMA_THRESHOLD      (~0UL)
+
+#define ARCH_HAS_SG_CHAIN
+
+#endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/arch/sparc/include/asm/scatterlist_32.h b/arch/sparc/include/asm/scatterlist_32.h
deleted file mode 100644 (file)
index c82609c..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef _SPARC_SCATTERLIST_H
-#define _SPARC_SCATTERLIST_H
-
-#include <linux/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-       unsigned long sg_magic;
-#endif
-       unsigned long page_link;
-       unsigned int offset;
-
-       unsigned int length;
-
-       __u32 dvma_address; /* A place to hang host-specific addresses at. */
-       __u32 dvma_length;
-};
-
-#define sg_dma_address(sg) ((sg)->dvma_address)
-#define sg_dma_len(sg)     ((sg)->dvma_length)
-
-#define ISA_DMA_THRESHOLD (~0UL)
-
-#define ARCH_HAS_SG_CHAIN
-
-#endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/arch/sparc/include/asm/scatterlist_64.h b/arch/sparc/include/asm/scatterlist_64.h
deleted file mode 100644 (file)
index 81bd058..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef _SPARC64_SCATTERLIST_H
-#define _SPARC64_SCATTERLIST_H
-
-#include <asm/page.h>
-#include <asm/types.h>
-
-struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-       unsigned long   sg_magic;
-#endif
-       unsigned long   page_link;
-       unsigned int    offset;
-
-       unsigned int    length;
-
-       dma_addr_t      dma_address;
-       __u32           dma_length;
-};
-
-#define sg_dma_address(sg)     ((sg)->dma_address)
-#define sg_dma_len(sg)         ((sg)->dma_length)
-
-#define ISA_DMA_THRESHOLD      (~0UL)
-
-#define ARCH_HAS_SG_CHAIN
-
-#endif /* !(_SPARC64_SCATTERLIST_H) */
index c7c69b00967ffe4fa826dcdbece00d901c1fe270..0b0553bbd8a0dd26feb97be864a29997b754643e 100644 (file)
@@ -1,8 +1,10 @@
-#ifndef ___ASM_SPARC_SECTIONS_H
-#define ___ASM_SPARC_SECTIONS_H
-#if defined(__sparc__) && defined(__arch64__)
-#include <asm/sections_64.h>
-#else
-#include <asm/sections_32.h>
-#endif
+#ifndef __SPARC_SECTIONS_H
+#define __SPARC_SECTIONS_H
+
+/* nothing to see, move along */
+#include <asm-generic/sections.h>
+
+/* sparc entry point */
+extern char _start[];
+
 #endif
diff --git a/arch/sparc/include/asm/sections_32.h b/arch/sparc/include/asm/sections_32.h
deleted file mode 100644 (file)
index 6832841..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _SPARC_SECTIONS_H
-#define _SPARC_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-#endif
diff --git a/arch/sparc/include/asm/sections_64.h b/arch/sparc/include/asm/sections_64.h
deleted file mode 100644 (file)
index 3f4b9fd..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _SPARC64_SECTIONS_H
-#define _SPARC64_SECTIONS_H
-
-/* nothing to see, move along */
-#include <asm-generic/sections.h>
-
-extern char _start[];
-
-#endif
index 120cfe4577c752bc44a417cdf2e4c1cef986f1c9..c4d274d330e98b848489c3aeb64cf8516f6f7b4f 100644 (file)
  * and rebuild your kernel.
  */
 
-/* All of these locking primitives are expected to work properly
- * even in an RMO memory model, which currently is what the kernel
- * runs in.
- *
- * There is another issue.  Because we play games to save cycles
- * in the non-contention case, we need to be extra careful about
- * branch targets into the "spinning" code.  They live in their
- * own section, but the newer V9 branches have a shorter range
- * than the traditional 32-bit sparc branch variants.  The rule
- * is that the branches that go into and out of the spinner sections
- * must be pre-V9 branches.
+/* Because we play games to save cycles in the non-contention case, we
+ * need to be extra careful about branch targets into the "spinning"
+ * code.  They live in their own section, but the newer V9 branches
+ * have a shorter range than the traditional 32-bit sparc branch
+ * variants.  The rule is that the branches that go into and out of
+ * the spinner sections must be pre-V9 branches.
  */
 
 #define __raw_spin_is_locked(lp)       ((lp)->lock != 0)
@@ -38,12 +33,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
 
        __asm__ __volatile__(
 "1:    ldstub          [%1], %0\n"
-"      membar          #StoreLoad | #StoreStore\n"
 "      brnz,pn         %0, 2f\n"
 "       nop\n"
 "      .subsection     2\n"
 "2:    ldub            [%1], %0\n"
-"      membar          #LoadLoad\n"
 "      brnz,pt         %0, 2b\n"
 "       nop\n"
 "      ba,a,pt         %%xcc, 1b\n"
@@ -59,7 +52,6 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 
        __asm__ __volatile__(
 "      ldstub          [%1], %0\n"
-"      membar          #StoreLoad | #StoreStore"
        : "=r" (result)
        : "r" (lock)
        : "memory");
@@ -70,7 +62,6 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
        __asm__ __volatile__(
-"      membar          #StoreStore | #LoadStore\n"
 "      stb             %%g0, [%0]"
        : /* No outputs */
        : "r" (lock)
@@ -83,14 +74,12 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
 
        __asm__ __volatile__(
 "1:    ldstub          [%2], %0\n"
-"      membar          #StoreLoad | #StoreStore\n"
 "      brnz,pn         %0, 2f\n"
 "       nop\n"
 "      .subsection     2\n"
 "2:    rdpr            %%pil, %1\n"
 "      wrpr            %3, %%pil\n"
 "3:    ldub            [%2], %0\n"
-"      membar          #LoadLoad\n"
 "      brnz,pt         %0, 3b\n"
 "       nop\n"
 "      ba,pt           %%xcc, 1b\n"
@@ -113,12 +102,10 @@ static void inline __read_lock(raw_rwlock_t *lock)
 "4:     add            %0, 1, %1\n"
 "      cas             [%2], %0, %1\n"
 "      cmp             %0, %1\n"
-"      membar          #StoreLoad | #StoreStore\n"
 "      bne,pn          %%icc, 1b\n"
 "       nop\n"
 "      .subsection     2\n"
 "2:    ldsw            [%2], %0\n"
-"      membar          #LoadLoad\n"
 "      brlz,pt         %0, 2b\n"
 "       nop\n"
 "      ba,a,pt         %%xcc, 4b\n"
@@ -139,7 +126,6 @@ static int inline __read_trylock(raw_rwlock_t *lock)
 "      add             %0, 1, %1\n"
 "      cas             [%2], %0, %1\n"
 "      cmp             %0, %1\n"
-"      membar          #StoreLoad | #StoreStore\n"
 "      bne,pn          %%icc, 1b\n"
 "       mov            1, %0\n"
 "2:"
@@ -155,7 +141,6 @@ static void inline __read_unlock(raw_rwlock_t *lock)
        unsigned long tmp1, tmp2;
 
        __asm__ __volatile__(
-"      membar  #StoreLoad | #LoadLoad\n"
 "1:    lduw    [%2], %0\n"
 "      sub     %0, 1, %1\n"
 "      cas     [%2], %0, %1\n"
@@ -179,12 +164,10 @@ static void inline __write_lock(raw_rwlock_t *lock)
 "4:     or             %0, %3, %1\n"
 "      cas             [%2], %0, %1\n"
 "      cmp             %0, %1\n"
-"      membar          #StoreLoad | #StoreStore\n"
 "      bne,pn          %%icc, 1b\n"
 "       nop\n"
 "      .subsection     2\n"
 "2:    lduw            [%2], %0\n"
-"      membar          #LoadLoad\n"
 "      brnz,pt         %0, 2b\n"
 "       nop\n"
 "      ba,a,pt         %%xcc, 4b\n"
@@ -197,7 +180,6 @@ static void inline __write_lock(raw_rwlock_t *lock)
 static void inline __write_unlock(raw_rwlock_t *lock)
 {
        __asm__ __volatile__(
-"      membar          #LoadStore | #StoreStore\n"
 "      stw             %%g0, [%0]"
        : /* no outputs */
        : "r" (lock)
@@ -217,7 +199,6 @@ static int inline __write_trylock(raw_rwlock_t *lock)
 "       or             %0, %4, %1\n"
 "      cas             [%3], %0, %1\n"
 "      cmp             %0, %1\n"
-"      membar          #StoreLoad | #StoreStore\n"
 "      bne,pn          %%icc, 1b\n"
 "       nop\n"
 "      mov             1, %2\n"
index 985ea7e319927d2e7906c13956cd95dc5354289e..f0d0c40c44da9c98bd83f573d6d8374d2c44172b 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef _SPARC64_SPITFIRE_H
 #define _SPARC64_SPITFIRE_H
 
+#ifdef CONFIG_SPARC64
+
 #include <asm/asi.h>
 
 /* The following register addresses are accessible via ASI_DMMU
@@ -338,5 +340,5 @@ static inline void cheetah_put_itlb_data(int entry, unsigned long data)
 }
 
 #endif /* !(__ASSEMBLY__) */
-
+#endif /* CONFIG_SPARC64 */
 #endif /* !(_SPARC64_SPITFIRE_H) */
index 8623fc48fe247bbe4005bb3400d34802cff70fa1..79c1ae2b42a346ec11aa284a9719a926212741f3 100644 (file)
 
 #include <linux/irqflags.h>
 
+static inline unsigned int probe_irq_mask(unsigned long val)
+{
+       return 0;
+}
+
 /*
  * Sparc (general) CPU types
  */
index 8759f2a1b837c3f4c655cf48d10b91475f54da12..6c077816ab28c923f75c47494fa8b37efb55ab0a 100644 (file)
@@ -59,20 +59,9 @@ do { __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
                             : : : "memory"); \
 } while (0)
 
-#define mb()   \
-       membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
-#define rmb()  \
-       membar_safe("#LoadLoad")
-#define wmb()  \
-       membar_safe("#StoreStore")
-#define membar_storeload() \
-       membar_safe("#StoreLoad")
-#define membar_storeload_storestore() \
-       membar_safe("#StoreLoad | #StoreStore")
-#define membar_storeload_loadload() \
-       membar_safe("#StoreLoad | #LoadLoad")
-#define membar_storestore_loadstore() \
-       membar_safe("#StoreStore | #LoadStore")
+#define mb()   membar_safe("#StoreLoad")
+#define rmb()  __asm__ __volatile__("":::"memory")
+#define wmb()  __asm__ __volatile__("":::"memory")
 
 #endif
 
@@ -80,20 +69,20 @@ do {        __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
 
 #define read_barrier_depends()         do { } while(0)
 #define set_mb(__var, __value) \
-       do { __var = __value; membar_storeload_storestore(); } while(0)
+       do { __var = __value; membar_safe("#StoreLoad"); } while(0)
 
 #ifdef CONFIG_SMP
 #define smp_mb()       mb()
 #define smp_rmb()      rmb()
 #define smp_wmb()      wmb()
-#define smp_read_barrier_depends()     read_barrier_depends()
 #else
 #define smp_mb()       __asm__ __volatile__("":::"memory")
 #define smp_rmb()      __asm__ __volatile__("":::"memory")
 #define smp_wmb()      __asm__ __volatile__("":::"memory")
-#define smp_read_barrier_depends()     do { } while(0)
 #endif
 
+#define smp_read_barrier_depends()     do { } while(0)
+
 #define flushi(addr)   __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
 
 #define flushw_all()   __asm__ __volatile__("flushw")
@@ -107,11 +96,12 @@ do {       __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
  * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
  * for more information.
  */
-#define reset_pic()                                                    \
-       __asm__ __volatile__("ba,pt     %xcc, 99f\n\t"          \
+#define write_pic(__p)                                         \
+       __asm__ __volatile__("ba,pt     %%xcc, 99f\n\t"         \
                             ".align    64\n"                   \
-                         "99:wr        %g0, 0x0, %pic\n\t"     \
-                            "rd        %pic, %g0")
+                         "99:wr        %0, 0x0, %%pic\n\t"     \
+                            "rd        %%pic, %%g0" : : "r" (__p))
+#define reset_pic()    write_pic(0)
 
 #ifndef __ASSEMBLY__
 
@@ -170,6 +160,7 @@ do {        if (test_thread_flag(TIF_PERFCTR)) {                            \
        "stb    %%o5, [%%g6 + %5]\n\t"                                  \
        "rdpr   %%cwp, %%o5\n\t"                                        \
        "stb    %%o5, [%%g6 + %8]\n\t"                                  \
+       "wrpr   %%g0, 15, %%pil\n\t"                                    \
        "mov    %4, %%g6\n\t"                                           \
        "ldub   [%4 + %8], %%g1\n\t"                                    \
        "wrpr   %%g1, %%cwp\n\t"                                        \
@@ -180,6 +171,7 @@ do {        if (test_thread_flag(TIF_PERFCTR)) {                            \
        "ldx    [%%sp + 2047 + 0x70], %%i6\n\t"                         \
        "ldx    [%%sp + 2047 + 0x78], %%i7\n\t"                         \
        "ldx    [%%g6 + %9], %%g4\n\t"                                  \
+       "wrpr   %%g0, 14, %%pil\n\t"                                    \
        "brz,pt %%o7, switch_to_pc\n\t"                                 \
        " mov   %%g7, %0\n\t"                                           \
        "sethi  %%hi(ret_from_syscall), %%g1\n\t"                       \
@@ -209,14 +201,12 @@ static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int va
        unsigned long tmp1, tmp2;
 
        __asm__ __volatile__(
-"      membar          #StoreLoad | #LoadLoad\n"
 "      mov             %0, %1\n"
 "1:    lduw            [%4], %2\n"
 "      cas             [%4], %2, %0\n"
 "      cmp             %2, %0\n"
 "      bne,a,pn        %%icc, 1b\n"
 "       mov            %1, %0\n"
-"      membar          #StoreLoad | #StoreStore\n"
        : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
        : "0" (val), "r" (m)
        : "cc", "memory");
@@ -228,14 +218,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
        unsigned long tmp1, tmp2;
 
        __asm__ __volatile__(
-"      membar          #StoreLoad | #LoadLoad\n"
 "      mov             %0, %1\n"
 "1:    ldx             [%4], %2\n"
 "      casx            [%4], %2, %0\n"
 "      cmp             %2, %0\n"
 "      bne,a,pn        %%xcc, 1b\n"
 "       mov            %1, %0\n"
-"      membar          #StoreLoad | #StoreStore\n"
        : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
        : "0" (val), "r" (m)
        : "cc", "memory");
@@ -272,9 +260,7 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret
 static inline unsigned long
 __cmpxchg_u32(volatile int *m, int old, int new)
 {
-       __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
-                            "cas [%2], %3, %0\n\t"
-                            "membar #StoreLoad | #StoreStore"
+       __asm__ __volatile__("cas [%2], %3, %0"
                             : "=&r" (new)
                             : "0" (new), "r" (m), "r" (old)
                             : "memory");
@@ -285,9 +271,7 @@ __cmpxchg_u32(volatile int *m, int old, int new)
 static inline unsigned long
 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
 {
-       __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
-                            "casx [%2], %3, %0\n\t"
-                            "membar #StoreLoad | #StoreStore"
+       __asm__ __volatile__("casx [%2], %3, %0"
                             : "=&r" (new)
                             : "0" (new), "r" (m), "r" (old)
                             : "memory");
index 76e4299dd9bc957667668bd39b38713d8cb9440e..83c571d8c8a7ebd2fb243c9b057105dd43c827e6 100644 (file)
@@ -50,8 +50,6 @@
 #define TSB_TAG_INVALID_BIT    46
 #define TSB_TAG_INVALID_HIGH   (1 << (TSB_TAG_INVALID_BIT - 32))
 
-#define TSB_MEMBAR     membar  #StoreStore
-
 /* Some cpus support physical address quad loads.  We want to use
  * those if possible so we don't need to hard-lock the TSB mapping
  * into the TLB.  We encode some instruction patching in order to
@@ -128,13 +126,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
        cmp     REG1, REG2;             \
        bne,pn  %icc, 99b;              \
         nop;                           \
-       TSB_MEMBAR
 
 #define TSB_WRITE(TSB, TTE, TAG) \
        add     TSB, 0x8, TSB;   \
        TSB_STORE(TSB, TTE);     \
        sub     TSB, 0x8, TSB;   \
-       TSB_MEMBAR;              \
        TSB_STORE(TSB, TAG);
 
 #define KTSB_LOAD_QUAD(TSB, REG) \
@@ -153,13 +149,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
        cmp     REG1, REG2;             \
        bne,pn  %icc, 99b;              \
         nop;                           \
-       TSB_MEMBAR
 
 #define KTSB_WRITE(TSB, TTE, TAG) \
        add     TSB, 0x8, TSB;   \
        stxa    TTE, [TSB] ASI_N;     \
        sub     TSB, 0x8, TSB;   \
-       TSB_MEMBAR;              \
        stxa    TAG, [TSB] ASI_N;
 
        /* Do a kernel page table walk.  Leaves physical PTE pointer in
index 5708ba2719fb07e666cef4a0cc1e1cccf597d14d..48f2807d326563da2bc95be4f870517685331aaf 100644 (file)
@@ -2,6 +2,7 @@
 #define _SPARC64_TTABLE_H
 
 #include <asm/utrap.h>
+#include <asm/pil.h>
 
 #ifdef __ASSEMBLY__
 #include <asm/thread_info.h>
 
 #define TRAP_IRQ(routine, level)                       \
        rdpr    %pil, %g2;                              \
-       wrpr    %g0, 15, %pil;                          \
+       wrpr    %g0, PIL_NORMAL_MAX, %pil;              \
        sethi   %hi(1f-4), %g7;                         \
        ba,pt   %xcc, etrap_irq;                        \
         or     %g7, %lo(1f-4), %g7;                    \
 
 #define TRAP_IRQ(routine, level)                       \
        rdpr    %pil, %g2;                              \
-       wrpr    %g0, 15, %pil;                          \
+       wrpr    %g0, PIL_NORMAL_MAX, %pil;              \
        ba,pt   %xcc, etrap_irq;                        \
         rd     %pc, %g7;                               \
        mov     level, %o0;                             \
 
 #endif
 
+#define TRAP_NMI_IRQ(routine, level)                   \
+       rdpr    %pil, %g2;                              \
+       wrpr    %g0, PIL_NMI, %pil;                     \
+       ba,pt   %xcc, etrap_irq;                        \
+        rd     %pc, %g7;                               \
+       mov     level, %o0;                             \
+       call    routine;                                \
+        add    %sp, PTREGS_OFF, %o1;                   \
+       ba,a,pt %xcc, rtrap_nmi;
+
 #define TRAP_IVEC TRAP_NOSAVE(do_ivec)
 
 #define BTRAP(lvl) TRAP_ARG(bad_trap, lvl)
index 4207fb362da03e300ce2cff530ca68401560c7f7..031f038b19f719c6a34fdd346efd4c3ebacf7283 100644 (file)
@@ -1,8 +1,444 @@
-#ifndef ___ASM_SPARC_UNISTD_H
-#define ___ASM_SPARC_UNISTD_H
-#if defined(__sparc__) && defined(__arch64__)
-#include <asm/unistd_64.h>
+#ifndef _SPARC_UNISTD_H
+#define _SPARC_UNISTD_H
+
+/*
+ * System calls under the Sparc.
+ *
+ * Don't be scared by the ugly clobbers, it is the only way I can
+ * think of right now to force the arguments into fixed registers
+ * before the trap into the system call with gcc 'asm' statements.
+ *
+ * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
+ *
+ * SunOS compatibility based upon preliminary work which is:
+ *
+ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
+ */
+#ifndef __32bit_syscall_numbers__
+#ifndef __arch64__
+#define __32bit_syscall_numbers__
+#endif
+#endif
+
+#define __NR_restart_syscall      0 /* Linux Specific                             */
+#define __NR_exit                 1 /* Common                                      */
+#define __NR_fork                 2 /* Common                                      */
+#define __NR_read                 3 /* Common                                      */
+#define __NR_write                4 /* Common                                      */
+#define __NR_open                 5 /* Common                                      */
+#define __NR_close                6 /* Common                                      */
+#define __NR_wait4                7 /* Common                                      */
+#define __NR_creat                8 /* Common                                      */
+#define __NR_link                 9 /* Common                                      */
+#define __NR_unlink              10 /* Common                                      */
+#define __NR_execv               11 /* SunOS Specific                              */
+#define __NR_chdir               12 /* Common                                      */
+#define __NR_chown              13 /* Common                                      */
+#define __NR_mknod               14 /* Common                                      */
+#define __NR_chmod               15 /* Common                                      */
+#define __NR_lchown              16 /* Common                                      */
+#define __NR_brk                 17 /* Common                                      */
+#define __NR_perfctr             18 /* Performance counter operations              */
+#define __NR_lseek               19 /* Common                                      */
+#define __NR_getpid              20 /* Common                                      */
+#define __NR_capget             21 /* Linux Specific                              */
+#define __NR_capset             22 /* Linux Specific                              */
+#define __NR_setuid              23 /* Implemented via setreuid in SunOS           */
+#define __NR_getuid              24 /* Common                                      */
+#define __NR_vmsplice           25 /* ENOSYS under SunOS                          */
+#define __NR_ptrace              26 /* Common                                      */
+#define __NR_alarm               27 /* Implemented via setitimer in SunOS          */
+#define __NR_sigaltstack        28 /* Common                                      */
+#define __NR_pause               29 /* Is sigblock(0)->sigpause() in SunOS         */
+#define __NR_utime               30 /* Implemented via utimes() under SunOS        */
+#ifdef __32bit_syscall_numbers__
+#define __NR_lchown32            31 /* Linux sparc32 specific                      */
+#define __NR_fchown32            32 /* Linux sparc32 specific                      */
+#endif
+#define __NR_access              33 /* Common                                      */
+#define __NR_nice                34 /* Implemented via get/setpriority() in SunOS  */
+#ifdef __32bit_syscall_numbers__
+#define __NR_chown32             35 /* Linux sparc32 specific                      */
+#endif
+#define __NR_sync                36 /* Common                                      */
+#define __NR_kill                37 /* Common                                      */
+#define __NR_stat                38 /* Common                                      */
+#define __NR_sendfile           39 /* Linux Specific                              */
+#define __NR_lstat               40 /* Common                                      */
+#define __NR_dup                 41 /* Common                                      */
+#define __NR_pipe                42 /* Common                                      */
+#define __NR_times               43 /* Implemented via getrusage() in SunOS        */
+#ifdef __32bit_syscall_numbers__
+#define __NR_getuid32            44 /* Linux sparc32 specific                      */
+#endif
+#define __NR_umount2             45 /* Linux Specific                              */
+#define __NR_setgid              46 /* Implemented via setregid() in SunOS         */
+#define __NR_getgid              47 /* Common                                      */
+#define __NR_signal              48 /* Implemented via sigvec() in SunOS           */
+#define __NR_geteuid             49 /* SunOS calls getuid()                        */
+#define __NR_getegid             50 /* SunOS calls getgid()                        */
+#define __NR_acct                51 /* Common                                      */
+#ifdef __32bit_syscall_numbers__
+#define __NR_getgid32            53 /* Linux sparc32 specific                      */
+#else
+#define __NR_memory_ordering    52 /* Linux Specific                              */
+#endif
+#define __NR_ioctl               54 /* Common                                      */
+#define __NR_reboot              55 /* Common                                      */
+#ifdef __32bit_syscall_numbers__
+#define __NR_mmap2              56 /* Linux sparc32 Specific                      */
+#endif
+#define __NR_symlink             57 /* Common                                      */
+#define __NR_readlink            58 /* Common                                      */
+#define __NR_execve              59 /* Common                                      */
+#define __NR_umask               60 /* Common                                      */
+#define __NR_chroot              61 /* Common                                      */
+#define __NR_fstat               62 /* Common                                      */
+#define __NR_fstat64            63 /* Linux Specific                              */
+#define __NR_getpagesize         64 /* Common                                      */
+#define __NR_msync               65 /* Common in newer 1.3.x revs...               */
+#define __NR_vfork               66 /* Common                                      */
+#define __NR_pread64             67 /* Linux Specific                              */
+#define __NR_pwrite64            68 /* Linux Specific                              */
+#ifdef __32bit_syscall_numbers__
+#define __NR_geteuid32           69 /* Linux sparc32, sbrk under SunOS             */
+#define __NR_getegid32           70 /* Linux sparc32, sstk under SunOS             */
+#endif
+#define __NR_mmap                71 /* Common                                      */
+#ifdef __32bit_syscall_numbers__
+#define __NR_setreuid32          72 /* Linux sparc32, vadvise under SunOS          */
+#endif
+#define __NR_munmap              73 /* Common                                      */
+#define __NR_mprotect            74 /* Common                                      */
+#define __NR_madvise             75 /* Common                                      */
+#define __NR_vhangup             76 /* Common                                      */
+#ifdef __32bit_syscall_numbers__
+#define __NR_truncate64                 77 /* Linux sparc32 Specific                      */
+#endif
+#define __NR_mincore             78 /* Common                                      */
+#define __NR_getgroups           79 /* Common                                      */
+#define __NR_setgroups           80 /* Common                                      */
+#define __NR_getpgrp             81 /* Common                                      */
+#ifdef __32bit_syscall_numbers__
+#define __NR_setgroups32         82 /* Linux sparc32, setpgrp under SunOS          */
+#endif
+#define __NR_setitimer           83 /* Common                                      */
+#ifdef __32bit_syscall_numbers__
+#define __NR_ftruncate64        84 /* Linux sparc32 Specific                      */
+#endif
+#define __NR_swapon              85 /* Common                                      */
+#define __NR_getitimer           86 /* Common                                      */
+#ifdef __32bit_syscall_numbers__
+#define __NR_setuid32            87 /* Linux sparc32, gethostname under SunOS      */
+#endif
+#define __NR_sethostname         88 /* Common                                      */
+#ifdef __32bit_syscall_numbers__
+#define __NR_setgid32            89 /* Linux sparc32, getdtablesize under SunOS    */
+#endif
+#define __NR_dup2                90 /* Common                                      */
+#ifdef __32bit_syscall_numbers__
+#define __NR_setfsuid32          91 /* Linux sparc32, getdopt under SunOS          */
+#endif
+#define __NR_fcntl               92 /* Common                                      */
+#define __NR_select              93 /* Common                                      */
+#ifdef __32bit_syscall_numbers__
+#define __NR_setfsgid32          94 /* Linux sparc32, setdopt under SunOS          */
+#endif
+#define __NR_fsync               95 /* Common                                      */
+#define __NR_setpriority         96 /* Common                                      */
+#define __NR_socket              97 /* Common                                      */
+#define __NR_connect             98 /* Common                                      */
+#define __NR_accept              99 /* Common                                      */
+#define __NR_getpriority        100 /* Common                                      */
+#define __NR_rt_sigreturn       101 /* Linux Specific                              */
+#define __NR_rt_sigaction       102 /* Linux Specific                              */
+#define __NR_rt_sigprocmask     103 /* Linux Specific                              */
+#define __NR_rt_sigpending      104 /* Linux Specific                              */
+#define __NR_rt_sigtimedwait    105 /* Linux Specific                              */
+#define __NR_rt_sigqueueinfo    106 /* Linux Specific                              */
+#define __NR_rt_sigsuspend      107 /* Linux Specific                              */
+#ifdef __32bit_syscall_numbers__
+#define __NR_setresuid32        108 /* Linux Specific, sigvec under SunOS         */
+#define __NR_getresuid32        109 /* Linux Specific, sigblock under SunOS       */
+#define __NR_setresgid32        110 /* Linux Specific, sigsetmask under SunOS     */
+#define __NR_getresgid32        111 /* Linux Specific, sigpause under SunOS       */
+#define __NR_setregid32         112 /* Linux sparc32, sigstack under SunOS         */
+#else
+#define __NR_setresuid          108 /* Linux Specific, sigvec under SunOS         */
+#define __NR_getresuid          109 /* Linux Specific, sigblock under SunOS       */
+#define __NR_setresgid          110 /* Linux Specific, sigsetmask under SunOS     */
+#define __NR_getresgid          111 /* Linux Specific, sigpause under SunOS       */
+#endif
+#define __NR_recvmsg            113 /* Common                                      */
+#define __NR_sendmsg            114 /* Common                                      */
+#ifdef __32bit_syscall_numbers__
+#define __NR_getgroups32        115 /* Linux sparc32, vtrace under SunOS           */
+#endif
+#define __NR_gettimeofday       116 /* Common                                      */
+#define __NR_getrusage          117 /* Common                                      */
+#define __NR_getsockopt         118 /* Common                                      */
+#define __NR_getcwd            119 /* Linux Specific                              */
+#define __NR_readv              120 /* Common                                      */
+#define __NR_writev             121 /* Common                                      */
+#define __NR_settimeofday       122 /* Common                                      */
+#define __NR_fchown             123 /* Common                                      */
+#define __NR_fchmod             124 /* Common                                      */
+#define __NR_recvfrom           125 /* Common                                      */
+#define __NR_setreuid           126 /* Common                                      */
+#define __NR_setregid           127 /* Common                                      */
+#define __NR_rename             128 /* Common                                      */
+#define __NR_truncate           129 /* Common                                      */
+#define __NR_ftruncate          130 /* Common                                      */
+#define __NR_flock              131 /* Common                                      */
+#define __NR_lstat64           132 /* Linux Specific                              */
+#define __NR_sendto             133 /* Common                                      */
+#define __NR_shutdown           134 /* Common                                      */
+#define __NR_socketpair         135 /* Common                                      */
+#define __NR_mkdir              136 /* Common                                      */
+#define __NR_rmdir              137 /* Common                                      */
+#define __NR_utimes             138 /* SunOS Specific                              */
+#define __NR_stat64            139 /* Linux Specific                              */
+#define __NR_sendfile64         140 /* adjtime under SunOS                         */
+#define __NR_getpeername        141 /* Common                                      */
+#define __NR_futex              142 /* gethostid under SunOS                       */
+#define __NR_gettid             143 /* ENOSYS under SunOS                          */
+#define __NR_getrlimit         144 /* Common                                      */
+#define __NR_setrlimit          145 /* Common                                      */
+#define __NR_pivot_root                146 /* Linux Specific, killpg under SunOS          */
+#define __NR_prctl             147 /* ENOSYS under SunOS                          */
+#define __NR_pciconfig_read    148 /* ENOSYS under SunOS                          */
+#define __NR_pciconfig_write   149 /* ENOSYS under SunOS                          */
+#define __NR_getsockname        150 /* Common                                      */
+#define __NR_inotify_init       151 /* Linux specific                              */
+#define __NR_inotify_add_watch  152 /* Linux specific                              */
+#define __NR_poll               153 /* Common                                      */
+#define __NR_getdents64                154 /* Linux specific                              */
+#ifdef __32bit_syscall_numbers__
+#define __NR_fcntl64           155 /* Linux sparc32 Specific                      */
+#endif
+#define __NR_inotify_rm_watch   156 /* Linux specific                             */
+#define __NR_statfs             157 /* Common                                      */
+#define __NR_fstatfs            158 /* Common                                      */
+#define __NR_umount             159 /* Common                                      */
+#define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS    */
+#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS           */
+#define __NR_getdomainname      162 /* SunOS Specific                              */
+#define __NR_setdomainname      163 /* Common                                      */
+#ifndef __32bit_syscall_numbers__
+#define __NR_utrap_install     164 /* SYSV ABI/v9 required                        */
+#endif
+#define __NR_quotactl           165 /* Common                                      */
+#define __NR_set_tid_address    166 /* Linux specific, exportfs under SunOS        */
+#define __NR_mount              167 /* Common                                      */
+#define __NR_ustat              168 /* Common                                      */
+#define __NR_setxattr           169 /* SunOS: semsys                               */
+#define __NR_lsetxattr          170 /* SunOS: msgsys                               */
+#define __NR_fsetxattr          171 /* SunOS: shmsys                               */
+#define __NR_getxattr           172 /* SunOS: auditsys                             */
+#define __NR_lgetxattr          173 /* SunOS: rfssys                               */
+#define __NR_getdents           174 /* Common                                      */
+#define __NR_setsid             175 /* Common                                      */
+#define __NR_fchdir             176 /* Common                                      */
+#define __NR_fgetxattr          177 /* SunOS: fchroot                              */
+#define __NR_listxattr          178 /* SunOS: vpixsys                              */
+#define __NR_llistxattr         179 /* SunOS: aioread                              */
+#define __NR_flistxattr         180 /* SunOS: aiowrite                             */
+#define __NR_removexattr        181 /* SunOS: aiowait                              */
+#define __NR_lremovexattr       182 /* SunOS: aiocancel                            */
+#define __NR_sigpending         183 /* Common                                      */
+#define __NR_query_module      184 /* Linux Specific                              */
+#define __NR_setpgid            185 /* Common                                      */
+#define __NR_fremovexattr       186 /* SunOS: pathconf                             */
+#define __NR_tkill              187 /* SunOS: fpathconf                            */
+#define __NR_exit_group                188 /* Linux specific, sysconf undef SunOS         */
+#define __NR_uname              189 /* Linux Specific                              */
+#define __NR_init_module        190 /* Linux Specific                              */
+#define __NR_personality        191 /* Linux Specific                              */
+#define __NR_remap_file_pages   192 /* Linux Specific                              */
+#define __NR_epoll_create       193 /* Linux Specific                              */
+#define __NR_epoll_ctl          194 /* Linux Specific                              */
+#define __NR_epoll_wait         195 /* Linux Specific                              */
+#define __NR_ioprio_set         196 /* Linux Specific                              */
+#define __NR_getppid            197 /* Linux Specific                              */
+#define __NR_sigaction          198 /* Linux Specific                              */
+#define __NR_sgetmask           199 /* Linux Specific                              */
+#define __NR_ssetmask           200 /* Linux Specific                              */
+#define __NR_sigsuspend         201 /* Linux Specific                              */
+#define __NR_oldlstat           202 /* Linux Specific                              */
+#define __NR_uselib             203 /* Linux Specific                              */
+#define __NR_readdir            204 /* Linux Specific                              */
+#define __NR_readahead          205 /* Linux Specific                              */
+#define __NR_socketcall         206 /* Linux Specific                              */
+#define __NR_syslog             207 /* Linux Specific                              */
+#define __NR_lookup_dcookie     208 /* Linux Specific                              */
+#define __NR_fadvise64          209 /* Linux Specific                              */
+#define __NR_fadvise64_64       210 /* Linux Specific                              */
+#define __NR_tgkill             211 /* Linux Specific                              */
+#define __NR_waitpid            212 /* Linux Specific                              */
+#define __NR_swapoff            213 /* Linux Specific                              */
+#define __NR_sysinfo            214 /* Linux Specific                              */
+#define __NR_ipc                215 /* Linux Specific                              */
+#define __NR_sigreturn          216 /* Linux Specific                              */
+#define __NR_clone              217 /* Linux Specific                              */
+#define __NR_ioprio_get         218 /* Linux Specific                              */
+#define __NR_adjtimex           219 /* Linux Specific                              */
+#define __NR_sigprocmask        220 /* Linux Specific                              */
+#define __NR_create_module      221 /* Linux Specific                              */
+#define __NR_delete_module      222 /* Linux Specific                              */
+#define __NR_get_kernel_syms    223 /* Linux Specific                              */
+#define __NR_getpgid            224 /* Linux Specific                              */
+#define __NR_bdflush            225 /* Linux Specific                              */
+#define __NR_sysfs              226 /* Linux Specific                              */
+#define __NR_afs_syscall        227 /* Linux Specific                              */
+#define __NR_setfsuid           228 /* Linux Specific                              */
+#define __NR_setfsgid           229 /* Linux Specific                              */
+#define __NR__newselect         230 /* Linux Specific                              */
+#ifdef __32bit_syscall_numbers__
+#define __NR_time               231 /* Linux Specific                              */
 #else
-#include <asm/unistd_32.h>
+#ifdef __KERNEL__
+#define __NR_time              231 /* Linux sparc32                               */
+#endif
+#endif
+#define __NR_splice             232 /* Linux Specific                              */
+#define __NR_stime              233 /* Linux Specific                              */
+#define __NR_statfs64           234 /* Linux Specific                              */
+#define __NR_fstatfs64          235 /* Linux Specific                              */
+#define __NR__llseek            236 /* Linux Specific                              */
+#define __NR_mlock              237
+#define __NR_munlock            238
+#define __NR_mlockall           239
+#define __NR_munlockall         240
+#define __NR_sched_setparam     241
+#define __NR_sched_getparam     242
+#define __NR_sched_setscheduler 243
+#define __NR_sched_getscheduler 244
+#define __NR_sched_yield        245
+#define __NR_sched_get_priority_max 246
+#define __NR_sched_get_priority_min 247
+#define __NR_sched_rr_get_interval  248
+#define __NR_nanosleep          249
+#define __NR_mremap             250
+#define __NR__sysctl            251
+#define __NR_getsid             252
+#define __NR_fdatasync          253
+#define __NR_nfsservctl         254
+#define __NR_sync_file_range   255
+#define __NR_clock_settime     256
+#define __NR_clock_gettime     257
+#define __NR_clock_getres      258
+#define __NR_clock_nanosleep   259
+#define __NR_sched_getaffinity 260
+#define __NR_sched_setaffinity 261
+#define __NR_timer_settime     262
+#define __NR_timer_gettime     263
+#define __NR_timer_getoverrun  264
+#define __NR_timer_delete      265
+#define __NR_timer_create      266
+/* #define __NR_vserver                267 Reserved for VSERVER */
+#define __NR_io_setup          268
+#define __NR_io_destroy                269
+#define __NR_io_submit         270
+#define __NR_io_cancel         271
+#define __NR_io_getevents      272
+#define __NR_mq_open           273
+#define __NR_mq_unlink         274
+#define __NR_mq_timedsend      275
+#define __NR_mq_timedreceive   276
+#define __NR_mq_notify         277
+#define __NR_mq_getsetattr     278
+#define __NR_waitid            279
+#define __NR_tee               280
+#define __NR_add_key           281
+#define __NR_request_key       282
+#define __NR_keyctl            283
+#define __NR_openat            284
+#define __NR_mkdirat           285
+#define __NR_mknodat           286
+#define __NR_fchownat          287
+#define __NR_futimesat         288
+#define __NR_fstatat64         289
+#define __NR_unlinkat          290
+#define __NR_renameat          291
+#define __NR_linkat            292
+#define __NR_symlinkat         293
+#define __NR_readlinkat                294
+#define __NR_fchmodat          295
+#define __NR_faccessat         296
+#define __NR_pselect6          297
+#define __NR_ppoll             298
+#define __NR_unshare           299
+#define __NR_set_robust_list   300
+#define __NR_get_robust_list   301
+#define __NR_migrate_pages     302
+#define __NR_mbind             303
+#define __NR_get_mempolicy     304
+#define __NR_set_mempolicy     305
+#define __NR_kexec_load                306
+#define __NR_move_pages                307
+#define __NR_getcpu            308
+#define __NR_epoll_pwait       309
+#define __NR_utimensat         310
+#define __NR_signalfd          311
+#define __NR_timerfd_create    312
+#define __NR_eventfd           313
+#define __NR_fallocate         314
+#define __NR_timerfd_settime   315
+#define __NR_timerfd_gettime   316
+#define __NR_signalfd4         317
+#define __NR_eventfd2          318
+#define __NR_epoll_create1     319
+#define __NR_dup3              320
+#define __NR_pipe2             321
+#define __NR_inotify_init1     322
+#define __NR_accept4           323
+
+#define NR_SYSCALLS            324
+
+#ifdef __32bit_syscall_numbers__
+/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
+ * it never had the plain ones and there is no value to adding those
+ * old versions into the syscall table.
+ */
+#define __IGNORE_setresuid
+#define __IGNORE_getresuid
+#define __IGNORE_setresgid
+#define __IGNORE_getresgid
 #endif
+
+#ifdef __KERNEL__
+#define __ARCH_WANT_IPC_PARSE_VERSION
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_SGETMASK
+#define __ARCH_WANT_SYS_SIGNAL
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
+#ifndef __32bit_syscall_numbers__
+#define __ARCH_WANT_COMPAT_SYS_TIME
+#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
 #endif
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+
+#endif /* __KERNEL__ */
+#endif /* _SPARC_UNISTD_H */
diff --git a/arch/sparc/include/asm/unistd_32.h b/arch/sparc/include/asm/unistd_32.h
deleted file mode 100644 (file)
index 0d13d2a..0000000
+++ /dev/null
@@ -1,385 +0,0 @@
-#ifndef _SPARC_UNISTD_H
-#define _SPARC_UNISTD_H
-
-/*
- * System calls under the Sparc.
- *
- * Don't be scared by the ugly clobbers, it is the only way I can
- * think of right now to force the arguments into fixed registers
- * before the trap into the system call with gcc 'asm' statements.
- *
- * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
- *
- * SunOS compatibility based upon preliminary work which is:
- *
- * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
- */
-
-#define __NR_restart_syscall      0 /* Linux Specific                             */
-#define __NR_exit                 1 /* Common                                      */
-#define __NR_fork                 2 /* Common                                      */
-#define __NR_read                 3 /* Common                                      */
-#define __NR_write                4 /* Common                                      */
-#define __NR_open                 5 /* Common                                      */
-#define __NR_close                6 /* Common                                      */
-#define __NR_wait4                7 /* Common                                      */
-#define __NR_creat                8 /* Common                                      */
-#define __NR_link                 9 /* Common                                      */
-#define __NR_unlink              10 /* Common                                      */
-#define __NR_execv               11 /* SunOS Specific                              */
-#define __NR_chdir               12 /* Common                                      */
-#define __NR_chown              13 /* Common                                      */
-#define __NR_mknod               14 /* Common                                      */
-#define __NR_chmod               15 /* Common                                      */
-#define __NR_lchown              16 /* Common                                      */
-#define __NR_brk                 17 /* Common                                      */
-#define __NR_perfctr             18 /* Performance counter operations              */
-#define __NR_lseek               19 /* Common                                      */
-#define __NR_getpid              20 /* Common                                      */
-#define __NR_capget             21 /* Linux Specific                              */
-#define __NR_capset             22 /* Linux Specific                              */
-#define __NR_setuid              23 /* Implemented via setreuid in SunOS           */
-#define __NR_getuid              24 /* Common                                      */
-#define __NR_vmsplice           25 /* ENOSYS under SunOS                          */
-#define __NR_ptrace              26 /* Common                                      */
-#define __NR_alarm               27 /* Implemented via setitimer in SunOS          */
-#define __NR_sigaltstack        28 /* Common                                      */
-#define __NR_pause               29 /* Is sigblock(0)->sigpause() in SunOS         */
-#define __NR_utime               30 /* Implemented via utimes() under SunOS        */
-#define __NR_lchown32            31 /* Linux sparc32 specific                      */
-#define __NR_fchown32            32 /* Linux sparc32 specific                      */
-#define __NR_access              33 /* Common                                      */
-#define __NR_nice                34 /* Implemented via get/setpriority() in SunOS  */
-#define __NR_chown32             35 /* Linux sparc32 specific                      */
-#define __NR_sync                36 /* Common                                      */
-#define __NR_kill                37 /* Common                                      */
-#define __NR_stat                38 /* Common                                      */
-#define __NR_sendfile           39 /* Linux Specific                              */
-#define __NR_lstat               40 /* Common                                      */
-#define __NR_dup                 41 /* Common                                      */
-#define __NR_pipe                42 /* Common                                      */
-#define __NR_times               43 /* Implemented via getrusage() in SunOS        */
-#define __NR_getuid32            44 /* Linux sparc32 specific                      */
-#define __NR_umount2             45 /* Linux Specific                              */
-#define __NR_setgid              46 /* Implemented via setregid() in SunOS         */
-#define __NR_getgid              47 /* Common                                      */
-#define __NR_signal              48 /* Implemented via sigvec() in SunOS           */
-#define __NR_geteuid             49 /* SunOS calls getuid()                        */
-#define __NR_getegid             50 /* SunOS calls getgid()                        */
-#define __NR_acct                51 /* Common                                      */
-/* #define __NR_memory_ordering  52    Linux sparc64 specific                     */
-#define __NR_getgid32            53 /* Linux sparc32 specific                      */
-#define __NR_ioctl               54 /* Common                                      */
-#define __NR_reboot              55 /* Common                                      */
-#define __NR_mmap2              56 /* Linux sparc32 Specific                      */
-#define __NR_symlink             57 /* Common                                      */
-#define __NR_readlink            58 /* Common                                      */
-#define __NR_execve              59 /* Common                                      */
-#define __NR_umask               60 /* Common                                      */
-#define __NR_chroot              61 /* Common                                      */
-#define __NR_fstat               62 /* Common                                      */
-#define __NR_fstat64            63 /* Linux Specific                              */
-#define __NR_getpagesize         64 /* Common                                      */
-#define __NR_msync               65 /* Common in newer 1.3.x revs...               */
-#define __NR_vfork               66 /* Common                                      */
-#define __NR_pread64             67 /* Linux Specific                              */
-#define __NR_pwrite64            68 /* Linux Specific                              */
-#define __NR_geteuid32           69 /* Linux sparc32, sbrk under SunOS             */
-#define __NR_getegid32           70 /* Linux sparc32, sstk under SunOS             */
-#define __NR_mmap                71 /* Common                                      */
-#define __NR_setreuid32          72 /* Linux sparc32, vadvise under SunOS          */
-#define __NR_munmap              73 /* Common                                      */
-#define __NR_mprotect            74 /* Common                                      */
-#define __NR_madvise             75 /* Common                                      */
-#define __NR_vhangup             76 /* Common                                      */
-#define __NR_truncate64                 77 /* Linux sparc32 Specific                      */
-#define __NR_mincore             78 /* Common                                      */
-#define __NR_getgroups           79 /* Common                                      */
-#define __NR_setgroups           80 /* Common                                      */
-#define __NR_getpgrp             81 /* Common                                      */
-#define __NR_setgroups32         82 /* Linux sparc32, setpgrp under SunOS          */
-#define __NR_setitimer           83 /* Common                                      */
-#define __NR_ftruncate64        84 /* Linux sparc32 Specific                      */
-#define __NR_swapon              85 /* Common                                      */
-#define __NR_getitimer           86 /* Common                                      */
-#define __NR_setuid32            87 /* Linux sparc32, gethostname under SunOS      */
-#define __NR_sethostname         88 /* Common                                      */
-#define __NR_setgid32            89 /* Linux sparc32, getdtablesize under SunOS    */
-#define __NR_dup2                90 /* Common                                      */
-#define __NR_setfsuid32          91 /* Linux sparc32, getdopt under SunOS          */
-#define __NR_fcntl               92 /* Common                                      */
-#define __NR_select              93 /* Common                                      */
-#define __NR_setfsgid32          94 /* Linux sparc32, setdopt under SunOS          */
-#define __NR_fsync               95 /* Common                                      */
-#define __NR_setpriority         96 /* Common                                      */
-#define __NR_socket              97 /* Common                                      */
-#define __NR_connect             98 /* Common                                      */
-#define __NR_accept              99 /* Common                                      */
-#define __NR_getpriority        100 /* Common                                      */
-#define __NR_rt_sigreturn       101 /* Linux Specific                              */
-#define __NR_rt_sigaction       102 /* Linux Specific                              */
-#define __NR_rt_sigprocmask     103 /* Linux Specific                              */
-#define __NR_rt_sigpending      104 /* Linux Specific                              */
-#define __NR_rt_sigtimedwait    105 /* Linux Specific                              */
-#define __NR_rt_sigqueueinfo    106 /* Linux Specific                              */
-#define __NR_rt_sigsuspend      107 /* Linux Specific                              */
-#define __NR_setresuid32        108 /* Linux Specific, sigvec under SunOS         */
-#define __NR_getresuid32        109 /* Linux Specific, sigblock under SunOS       */
-#define __NR_setresgid32        110 /* Linux Specific, sigsetmask under SunOS     */
-#define __NR_getresgid32        111 /* Linux Specific, sigpause under SunOS       */
-#define __NR_setregid32         112 /* Linux sparc32, sigstack under SunOS         */
-#define __NR_recvmsg            113 /* Common                                      */
-#define __NR_sendmsg            114 /* Common                                      */
-#define __NR_getgroups32        115 /* Linux sparc32, vtrace under SunOS           */
-#define __NR_gettimeofday       116 /* Common                                      */
-#define __NR_getrusage          117 /* Common                                      */
-#define __NR_getsockopt         118 /* Common                                      */
-#define __NR_getcwd            119 /* Linux Specific                              */
-#define __NR_readv              120 /* Common                                      */
-#define __NR_writev             121 /* Common                                      */
-#define __NR_settimeofday       122 /* Common                                      */
-#define __NR_fchown             123 /* Common                                      */
-#define __NR_fchmod             124 /* Common                                      */
-#define __NR_recvfrom           125 /* Common                                      */
-#define __NR_setreuid           126 /* Common                                      */
-#define __NR_setregid           127 /* Common                                      */
-#define __NR_rename             128 /* Common                                      */
-#define __NR_truncate           129 /* Common                                      */
-#define __NR_ftruncate          130 /* Common                                      */
-#define __NR_flock              131 /* Common                                      */
-#define __NR_lstat64           132 /* Linux Specific                              */
-#define __NR_sendto             133 /* Common                                      */
-#define __NR_shutdown           134 /* Common                                      */
-#define __NR_socketpair         135 /* Common                                      */
-#define __NR_mkdir              136 /* Common                                      */
-#define __NR_rmdir              137 /* Common                                      */
-#define __NR_utimes             138 /* SunOS Specific                              */
-#define __NR_stat64            139 /* Linux Specific                              */
-#define __NR_sendfile64         140 /* adjtime under SunOS                         */
-#define __NR_getpeername        141 /* Common                                      */
-#define __NR_futex              142 /* gethostid under SunOS                       */
-#define __NR_gettid             143 /* ENOSYS under SunOS                          */
-#define __NR_getrlimit          144 /* Common                                      */
-#define __NR_setrlimit          145 /* Common                                      */
-#define __NR_pivot_root                146 /* Linux Specific, killpg under SunOS          */
-#define __NR_prctl             147 /* ENOSYS under SunOS                          */
-#define __NR_pciconfig_read    148 /* ENOSYS under SunOS                          */
-#define __NR_pciconfig_write   149 /* ENOSYS under SunOS                          */
-#define __NR_getsockname        150 /* Common                                      */
-#define __NR_inotify_init       151 /* Linux specific                              */
-#define __NR_inotify_add_watch  152 /* Linux specific                              */
-#define __NR_poll               153 /* Common                                      */
-#define __NR_getdents64                154 /* Linux specific                              */
-#define __NR_fcntl64           155 /* Linux sparc32 Specific                      */
-#define __NR_inotify_rm_watch   156 /* Linux specific                             */
-#define __NR_statfs             157 /* Common                                      */
-#define __NR_fstatfs            158 /* Common                                      */
-#define __NR_umount             159 /* Common                                      */
-#define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS    */
-#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS           */
-#define __NR_getdomainname      162 /* SunOS Specific                              */
-#define __NR_setdomainname      163 /* Common                                      */
-/* #define __NR_utrap_install   164    Linux sparc64 specific                     */
-#define __NR_quotactl           165 /* Common                                      */
-#define __NR_set_tid_address    166 /* Linux specific, exportfs under SunOS        */
-#define __NR_mount              167 /* Common                                      */
-#define __NR_ustat              168 /* Common                                      */
-#define __NR_setxattr           169 /* SunOS: semsys                               */
-#define __NR_lsetxattr          170 /* SunOS: msgsys                               */
-#define __NR_fsetxattr          171 /* SunOS: shmsys                               */
-#define __NR_getxattr           172 /* SunOS: auditsys                             */
-#define __NR_lgetxattr          173 /* SunOS: rfssys                               */
-#define __NR_getdents           174 /* Common                                      */
-#define __NR_setsid             175 /* Common                                      */
-#define __NR_fchdir             176 /* Common                                      */
-#define __NR_fgetxattr          177 /* SunOS: fchroot                              */
-#define __NR_listxattr          178 /* SunOS: vpixsys                              */
-#define __NR_llistxattr         179 /* SunOS: aioread                              */
-#define __NR_flistxattr         180 /* SunOS: aiowrite                             */
-#define __NR_removexattr        181 /* SunOS: aiowait                              */
-#define __NR_lremovexattr       182 /* SunOS: aiocancel                            */
-#define __NR_sigpending         183 /* Common                                      */
-#define __NR_query_module      184 /* Linux Specific                              */
-#define __NR_setpgid            185 /* Common                                      */
-#define __NR_fremovexattr       186 /* SunOS: pathconf                             */
-#define __NR_tkill              187 /* SunOS: fpathconf                            */
-#define __NR_exit_group                188 /* Linux specific, sysconf undef SunOS         */
-#define __NR_uname              189 /* Linux Specific                              */
-#define __NR_init_module        190 /* Linux Specific                              */
-#define __NR_personality        191 /* Linux Specific                              */
-#define __NR_remap_file_pages   192 /* Linux Specific                              */
-#define __NR_epoll_create       193 /* Linux Specific                              */
-#define __NR_epoll_ctl          194 /* Linux Specific                              */
-#define __NR_epoll_wait         195 /* Linux Specific                              */
-#define __NR_ioprio_set         196 /* Linux Specific                              */
-#define __NR_getppid            197 /* Linux Specific                              */
-#define __NR_sigaction          198 /* Linux Specific                              */
-#define __NR_sgetmask           199 /* Linux Specific                              */
-#define __NR_ssetmask           200 /* Linux Specific                              */
-#define __NR_sigsuspend         201 /* Linux Specific                              */
-#define __NR_oldlstat           202 /* Linux Specific                              */
-#define __NR_uselib             203 /* Linux Specific                              */
-#define __NR_readdir            204 /* Linux Specific                              */
-#define __NR_readahead          205 /* Linux Specific                              */
-#define __NR_socketcall         206 /* Linux Specific                              */
-#define __NR_syslog             207 /* Linux Specific                              */
-#define __NR_lookup_dcookie     208 /* Linux Specific                              */
-#define __NR_fadvise64          209 /* Linux Specific                              */
-#define __NR_fadvise64_64       210 /* Linux Specific                              */
-#define __NR_tgkill             211 /* Linux Specific                              */
-#define __NR_waitpid            212 /* Linux Specific                              */
-#define __NR_swapoff            213 /* Linux Specific                              */
-#define __NR_sysinfo            214 /* Linux Specific                              */
-#define __NR_ipc                215 /* Linux Specific                              */
-#define __NR_sigreturn          216 /* Linux Specific                              */
-#define __NR_clone              217 /* Linux Specific                              */
-#define __NR_ioprio_get         218 /* Linux Specific                              */
-#define __NR_adjtimex           219 /* Linux Specific                              */
-#define __NR_sigprocmask        220 /* Linux Specific                              */
-#define __NR_create_module      221 /* Linux Specific                              */
-#define __NR_delete_module      222 /* Linux Specific                              */
-#define __NR_get_kernel_syms    223 /* Linux Specific                              */
-#define __NR_getpgid            224 /* Linux Specific                              */
-#define __NR_bdflush            225 /* Linux Specific                              */
-#define __NR_sysfs              226 /* Linux Specific                              */
-#define __NR_afs_syscall        227 /* Linux Specific                              */
-#define __NR_setfsuid           228 /* Linux Specific                              */
-#define __NR_setfsgid           229 /* Linux Specific                              */
-#define __NR__newselect         230 /* Linux Specific                              */
-#define __NR_time               231 /* Linux Specific                              */
-#define __NR_splice             232 /* Linux Specific                              */
-#define __NR_stime              233 /* Linux Specific                              */
-#define __NR_statfs64           234 /* Linux Specific                              */
-#define __NR_fstatfs64          235 /* Linux Specific                              */
-#define __NR__llseek            236 /* Linux Specific                              */
-#define __NR_mlock              237
-#define __NR_munlock            238
-#define __NR_mlockall           239
-#define __NR_munlockall         240
-#define __NR_sched_setparam     241
-#define __NR_sched_getparam     242
-#define __NR_sched_setscheduler 243
-#define __NR_sched_getscheduler 244
-#define __NR_sched_yield        245
-#define __NR_sched_get_priority_max 246
-#define __NR_sched_get_priority_min 247
-#define __NR_sched_rr_get_interval  248
-#define __NR_nanosleep          249
-#define __NR_mremap             250
-#define __NR__sysctl            251
-#define __NR_getsid             252
-#define __NR_fdatasync          253
-#define __NR_nfsservctl         254
-#define __NR_sync_file_range   255
-#define __NR_clock_settime     256
-#define __NR_clock_gettime     257
-#define __NR_clock_getres      258
-#define __NR_clock_nanosleep   259
-#define __NR_sched_getaffinity 260
-#define __NR_sched_setaffinity 261
-#define __NR_timer_settime     262
-#define __NR_timer_gettime     263
-#define __NR_timer_getoverrun  264
-#define __NR_timer_delete      265
-#define __NR_timer_create      266
-/* #define __NR_vserver                267 Reserved for VSERVER */
-#define __NR_io_setup          268
-#define __NR_io_destroy                269
-#define __NR_io_submit         270
-#define __NR_io_cancel         271
-#define __NR_io_getevents      272
-#define __NR_mq_open           273
-#define __NR_mq_unlink         274
-#define __NR_mq_timedsend      275
-#define __NR_mq_timedreceive   276
-#define __NR_mq_notify         277
-#define __NR_mq_getsetattr     278
-#define __NR_waitid            279
-#define __NR_tee               280
-#define __NR_add_key           281
-#define __NR_request_key       282
-#define __NR_keyctl            283
-#define __NR_openat            284
-#define __NR_mkdirat           285
-#define __NR_mknodat           286
-#define __NR_fchownat          287
-#define __NR_futimesat         288
-#define __NR_fstatat64         289
-#define __NR_unlinkat          290
-#define __NR_renameat          291
-#define __NR_linkat            292
-#define __NR_symlinkat         293
-#define __NR_readlinkat                294
-#define __NR_fchmodat          295
-#define __NR_faccessat         296
-#define __NR_pselect6          297
-#define __NR_ppoll             298
-#define __NR_unshare           299
-#define __NR_set_robust_list   300
-#define __NR_get_robust_list   301
-#define __NR_migrate_pages     302
-#define __NR_mbind             303
-#define __NR_get_mempolicy     304
-#define __NR_set_mempolicy     305
-#define __NR_kexec_load                306
-#define __NR_move_pages                307
-#define __NR_getcpu            308
-#define __NR_epoll_pwait       309
-#define __NR_utimensat         310
-#define __NR_signalfd          311
-#define __NR_timerfd_create    312
-#define __NR_eventfd           313
-#define __NR_fallocate         314
-#define __NR_timerfd_settime   315
-#define __NR_timerfd_gettime   316
-#define __NR_signalfd4         317
-#define __NR_eventfd2          318
-#define __NR_epoll_create1     319
-#define __NR_dup3              320
-#define __NR_pipe2             321
-#define __NR_inotify_init1     322
-#define __NR_accept4           323
-
-#define NR_SYSCALLS            324
-
-/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
- * it never had the plain ones and there is no value to adding those
- * old versions into the syscall table.
- */
-#define __IGNORE_setresuid
-#define __IGNORE_getresuid
-#define __IGNORE_setresgid
-#define __IGNORE_getresgid
-
-#ifdef __KERNEL__
-#define __ARCH_WANT_IPC_PARSE_VERSION
-#define __ARCH_WANT_OLD_READDIR
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_GETHOSTNAME
-#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
-#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_WAITPID
-#define __ARCH_WANT_SYS_SOCKETCALL
-#define __ARCH_WANT_SYS_FADVISE64
-#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __ARCH_WANT_SYS_SIGPENDING
-#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-
-/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-
-#endif /* __KERNEL__ */
-#endif /* _SPARC_UNISTD_H */
diff --git a/arch/sparc/include/asm/unistd_64.h b/arch/sparc/include/asm/unistd_64.h
deleted file mode 100644 (file)
index fa5d3c0..0000000
+++ /dev/null
@@ -1,380 +0,0 @@
-#ifndef _SPARC64_UNISTD_H
-#define _SPARC64_UNISTD_H
-
-/*
- * System calls under the Sparc.
- *
- * Don't be scared by the ugly clobbers, it is the only way I can
- * think of right now to force the arguments into fixed registers
- * before the trap into the system call with gcc 'asm' statements.
- *
- * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
- *
- * SunOS compatibility based upon preliminary work which is:
- *
- * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
- */
-
-#define __NR_restart_syscall      0 /* Linux Specific                             */
-#define __NR_exit                 1 /* Common                                      */
-#define __NR_fork                 2 /* Common                                      */
-#define __NR_read                 3 /* Common                                      */
-#define __NR_write                4 /* Common                                      */
-#define __NR_open                 5 /* Common                                      */
-#define __NR_close                6 /* Common                                      */
-#define __NR_wait4                7 /* Common                                      */
-#define __NR_creat                8 /* Common                                      */
-#define __NR_link                 9 /* Common                                      */
-#define __NR_unlink              10 /* Common                                      */
-#define __NR_execv               11 /* SunOS Specific                              */
-#define __NR_chdir               12 /* Common                                      */
-#define __NR_chown              13 /* Common                                      */
-#define __NR_mknod               14 /* Common                                      */
-#define __NR_chmod               15 /* Common                                      */
-#define __NR_lchown              16 /* Common                                      */
-#define __NR_brk                 17 /* Common                                      */
-#define __NR_perfctr             18 /* Performance counter operations              */
-#define __NR_lseek               19 /* Common                                      */
-#define __NR_getpid              20 /* Common                                      */
-#define __NR_capget             21 /* Linux Specific                              */
-#define __NR_capset             22 /* Linux Specific                              */
-#define __NR_setuid              23 /* Implemented via setreuid in SunOS           */
-#define __NR_getuid              24 /* Common                                      */
-#define __NR_vmsplice           25 /* ENOSYS under SunOS                          */
-#define __NR_ptrace              26 /* Common                                      */
-#define __NR_alarm               27 /* Implemented via setitimer in SunOS          */
-#define __NR_sigaltstack        28 /* Common                                      */
-#define __NR_pause               29 /* Is sigblock(0)->sigpause() in SunOS         */
-#define __NR_utime               30 /* Implemented via utimes() under SunOS        */
-/* #define __NR_lchown32         31    Linux sparc32 specific                      */
-/* #define __NR_fchown32         32    Linux sparc32 specific                      */
-#define __NR_access              33 /* Common                                      */
-#define __NR_nice                34 /* Implemented via get/setpriority() in SunOS  */
-/* #define __NR_chown32          35    Linux sparc32 specific                      */
-#define __NR_sync                36 /* Common                                      */
-#define __NR_kill                37 /* Common                                      */
-#define __NR_stat                38 /* Common                                      */
-#define __NR_sendfile           39 /* Linux Specific                              */
-#define __NR_lstat               40 /* Common                                      */
-#define __NR_dup                 41 /* Common                                      */
-#define __NR_pipe                42 /* Common                                      */
-#define __NR_times               43 /* Implemented via getrusage() in SunOS        */
-/* #define __NR_getuid32         44    Linux sparc32 specific                      */
-#define __NR_umount2             45 /* Linux Specific                              */
-#define __NR_setgid              46 /* Implemented via setregid() in SunOS         */
-#define __NR_getgid              47 /* Common                                      */
-#define __NR_signal              48 /* Implemented via sigvec() in SunOS           */
-#define __NR_geteuid             49 /* SunOS calls getuid()                        */
-#define __NR_getegid             50 /* SunOS calls getgid()                        */
-#define __NR_acct                51 /* Common                                      */
-#define __NR_memory_ordering    52 /* Linux Specific                              */
-/* #define __NR_getgid32         53    Linux sparc32 specific                      */
-#define __NR_ioctl               54 /* Common                                      */
-#define __NR_reboot              55 /* Common                                      */
-/* #define __NR_mmap2           56    Linux sparc32 Specific                      */
-#define __NR_symlink             57 /* Common                                      */
-#define __NR_readlink            58 /* Common                                      */
-#define __NR_execve              59 /* Common                                      */
-#define __NR_umask               60 /* Common                                      */
-#define __NR_chroot              61 /* Common                                      */
-#define __NR_fstat               62 /* Common                                      */
-#define __NR_fstat64             63 /* Linux Specific                              */
-#define __NR_getpagesize         64 /* Common                                      */
-#define __NR_msync               65 /* Common in newer 1.3.x revs...               */
-#define __NR_vfork               66 /* Common                                      */
-#define __NR_pread64             67 /* Linux Specific                              */
-#define __NR_pwrite64            68 /* Linux Specific                              */
-/* #define __NR_geteuid32        69    Linux sparc32, sbrk under SunOS             */
-/* #define __NR_getegid32        70    Linux sparc32, sstk under SunOS             */
-#define __NR_mmap                71 /* Common                                      */
-/* #define __NR_setreuid32       72    Linux sparc32, vadvise under SunOS          */
-#define __NR_munmap              73 /* Common                                      */
-#define __NR_mprotect            74 /* Common                                      */
-#define __NR_madvise             75 /* Common                                      */
-#define __NR_vhangup             76 /* Common                                      */
-/* #define __NR_truncate64       77    Linux sparc32 Specific                     */
-#define __NR_mincore             78 /* Common                                      */
-#define __NR_getgroups           79 /* Common                                      */
-#define __NR_setgroups           80 /* Common                                      */
-#define __NR_getpgrp             81 /* Common                                      */
-/* #define __NR_setgroups32      82    Linux sparc32, setpgrp under SunOS          */
-#define __NR_setitimer           83 /* Common                                      */
-/* #define __NR_ftruncate64      84    Linux sparc32 Specific                     */
-#define __NR_swapon              85 /* Common                                      */
-#define __NR_getitimer           86 /* Common                                      */
-/* #define __NR_setuid32         87    Linux sparc32, gethostname under SunOS      */
-#define __NR_sethostname         88 /* Common                                      */
-/* #define __NR_setgid32         89    Linux sparc32, getdtablesize under SunOS    */
-#define __NR_dup2                90 /* Common                                      */
-/* #define __NR_setfsuid32       91    Linux sparc32, getdopt under SunOS          */
-#define __NR_fcntl               92 /* Common                                      */
-#define __NR_select              93 /* Common                                      */
-/* #define __NR_setfsgid32       94    Linux sparc32, setdopt under SunOS          */
-#define __NR_fsync               95 /* Common                                      */
-#define __NR_setpriority         96 /* Common                                      */
-#define __NR_socket              97 /* Common                                      */
-#define __NR_connect             98 /* Common                                      */
-#define __NR_accept              99 /* Common                                      */
-#define __NR_getpriority        100 /* Common                                      */
-#define __NR_rt_sigreturn       101 /* Linux Specific                              */
-#define __NR_rt_sigaction       102 /* Linux Specific                              */
-#define __NR_rt_sigprocmask     103 /* Linux Specific                              */
-#define __NR_rt_sigpending      104 /* Linux Specific                              */
-#define __NR_rt_sigtimedwait    105 /* Linux Specific                              */
-#define __NR_rt_sigqueueinfo    106 /* Linux Specific                              */
-#define __NR_rt_sigsuspend      107 /* Linux Specific                              */
-#define __NR_setresuid          108 /* Linux Specific, sigvec under SunOS         */
-#define __NR_getresuid          109 /* Linux Specific, sigblock under SunOS       */
-#define __NR_setresgid          110 /* Linux Specific, sigsetmask under SunOS     */
-#define __NR_getresgid          111 /* Linux Specific, sigpause under SunOS       */
-/* #define __NR_setregid32       75    Linux sparc32, sigstack under SunOS         */
-#define __NR_recvmsg            113 /* Common                                      */
-#define __NR_sendmsg            114 /* Common                                      */
-/* #define __NR_getgroups32     115    Linux sparc32, vtrace under SunOS           */
-#define __NR_gettimeofday       116 /* Common                                      */
-#define __NR_getrusage          117 /* Common                                      */
-#define __NR_getsockopt         118 /* Common                                      */
-#define __NR_getcwd            119 /* Linux Specific                              */
-#define __NR_readv              120 /* Common                                      */
-#define __NR_writev             121 /* Common                                      */
-#define __NR_settimeofday       122 /* Common                                      */
-#define __NR_fchown             123 /* Common                                      */
-#define __NR_fchmod             124 /* Common                                      */
-#define __NR_recvfrom           125 /* Common                                      */
-#define __NR_setreuid           126 /* Common                                      */
-#define __NR_setregid           127 /* Common                                      */
-#define __NR_rename             128 /* Common                                      */
-#define __NR_truncate           129 /* Common                                      */
-#define __NR_ftruncate          130 /* Common                                      */
-#define __NR_flock              131 /* Common                                      */
-#define __NR_lstat64           132 /* Linux Specific                              */
-#define __NR_sendto             133 /* Common                                      */
-#define __NR_shutdown           134 /* Common                                      */
-#define __NR_socketpair         135 /* Common                                      */
-#define __NR_mkdir              136 /* Common                                      */
-#define __NR_rmdir              137 /* Common                                      */
-#define __NR_utimes             138 /* SunOS Specific                              */
-#define __NR_stat64            139 /* Linux Specific                              */
-#define __NR_sendfile64         140 /* adjtime under SunOS                         */
-#define __NR_getpeername        141 /* Common                                      */
-#define __NR_futex              142 /* gethostid under SunOS                       */
-#define __NR_gettid             143 /* ENOSYS under SunOS                          */
-#define __NR_getrlimit         144 /* Common                                      */
-#define __NR_setrlimit          145 /* Common                                      */
-#define __NR_pivot_root                146 /* Linux Specific, killpg under SunOS          */
-#define __NR_prctl             147 /* ENOSYS under SunOS                          */
-#define __NR_pciconfig_read    148 /* ENOSYS under SunOS                          */
-#define __NR_pciconfig_write   149 /* ENOSYS under SunOS                          */
-#define __NR_getsockname        150 /* Common                                      */
-#define __NR_inotify_init       151 /* Linux specific                              */
-#define __NR_inotify_add_watch  152 /* Linux specific                              */
-#define __NR_poll               153 /* Common                                      */
-#define __NR_getdents64                154 /* Linux specific                              */
-/* #define __NR_fcntl64         155    Linux sparc32 Specific                      */
-#define __NR_inotify_rm_watch   156 /* Linux specific                             */
-#define __NR_statfs             157 /* Common                                      */
-#define __NR_fstatfs            158 /* Common                                      */
-#define __NR_umount             159 /* Common                                      */
-#define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS    */
-#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS           */
-#define __NR_getdomainname      162 /* SunOS Specific                              */
-#define __NR_setdomainname      163 /* Common                                      */
-#define __NR_utrap_install     164 /* SYSV ABI/v9 required                        */
-#define __NR_quotactl           165 /* Common                                      */
-#define __NR_set_tid_address    166 /* Linux specific, exportfs under SunOS        */
-#define __NR_mount              167 /* Common                                      */
-#define __NR_ustat              168 /* Common                                      */
-#define __NR_setxattr           169 /* SunOS: semsys                               */
-#define __NR_lsetxattr          170 /* SunOS: msgsys                               */
-#define __NR_fsetxattr          171 /* SunOS: shmsys                               */
-#define __NR_getxattr           172 /* SunOS: auditsys                             */
-#define __NR_lgetxattr          173 /* SunOS: rfssys                               */
-#define __NR_getdents           174 /* Common                                      */
-#define __NR_setsid             175 /* Common                                      */
-#define __NR_fchdir             176 /* Common                                      */
-#define __NR_fgetxattr          177 /* SunOS: fchroot                              */
-#define __NR_listxattr          178 /* SunOS: vpixsys                              */
-#define __NR_llistxattr         179 /* SunOS: aioread                              */
-#define __NR_flistxattr         180 /* SunOS: aiowrite                             */
-#define __NR_removexattr        181 /* SunOS: aiowait                              */
-#define __NR_lremovexattr       182 /* SunOS: aiocancel                            */
-#define __NR_sigpending         183 /* Common                                      */
-#define __NR_query_module      184 /* Linux Specific                              */
-#define __NR_setpgid            185 /* Common                                      */
-#define __NR_fremovexattr       186 /* SunOS: pathconf                             */
-#define __NR_tkill              187 /* SunOS: fpathconf                            */
-#define __NR_exit_group                188 /* Linux specific, sysconf undef SunOS         */
-#define __NR_uname              189 /* Linux Specific                              */
-#define __NR_init_module        190 /* Linux Specific                              */
-#define __NR_personality        191 /* Linux Specific                              */
-#define __NR_remap_file_pages   192 /* Linux Specific                              */
-#define __NR_epoll_create       193 /* Linux Specific                              */
-#define __NR_epoll_ctl          194 /* Linux Specific                              */
-#define __NR_epoll_wait         195 /* Linux Specific                              */
-#define __NR_ioprio_set         196 /* Linux Specific                              */
-#define __NR_getppid            197 /* Linux Specific                              */
-#define __NR_sigaction          198 /* Linux Specific                              */
-#define __NR_sgetmask           199 /* Linux Specific                              */
-#define __NR_ssetmask           200 /* Linux Specific                              */
-#define __NR_sigsuspend         201 /* Linux Specific                              */
-#define __NR_oldlstat           202 /* Linux Specific                              */
-#define __NR_uselib             203 /* Linux Specific                              */
-#define __NR_readdir            204 /* Linux Specific                              */
-#define __NR_readahead          205 /* Linux Specific                              */
-#define __NR_socketcall         206 /* Linux Specific                              */
-#define __NR_syslog             207 /* Linux Specific                              */
-#define __NR_lookup_dcookie     208 /* Linux Specific                              */
-#define __NR_fadvise64          209 /* Linux Specific                              */
-#define __NR_fadvise64_64       210 /* Linux Specific                              */
-#define __NR_tgkill             211 /* Linux Specific                              */
-#define __NR_waitpid            212 /* Linux Specific                              */
-#define __NR_swapoff            213 /* Linux Specific                              */
-#define __NR_sysinfo            214 /* Linux Specific                              */
-#define __NR_ipc                215 /* Linux Specific                              */
-#define __NR_sigreturn          216 /* Linux Specific                              */
-#define __NR_clone              217 /* Linux Specific                              */
-#define __NR_ioprio_get         218 /* Linux Specific                              */
-#define __NR_adjtimex           219 /* Linux Specific                              */
-#define __NR_sigprocmask        220 /* Linux Specific                              */
-#define __NR_create_module      221 /* Linux Specific                              */
-#define __NR_delete_module      222 /* Linux Specific                              */
-#define __NR_get_kernel_syms    223 /* Linux Specific                              */
-#define __NR_getpgid            224 /* Linux Specific                              */
-#define __NR_bdflush            225 /* Linux Specific                              */
-#define __NR_sysfs              226 /* Linux Specific                              */
-#define __NR_afs_syscall        227 /* Linux Specific                              */
-#define __NR_setfsuid           228 /* Linux Specific                              */
-#define __NR_setfsgid           229 /* Linux Specific                              */
-#define __NR__newselect         230 /* Linux Specific                              */
-#ifdef __KERNEL__
-#define __NR_time              231 /* Linux sparc32                               */
-#endif
-#define __NR_splice             232 /* Linux Specific                              */
-#define __NR_stime              233 /* Linux Specific                              */
-#define __NR_statfs64           234 /* Linux Specific                              */
-#define __NR_fstatfs64          235 /* Linux Specific                              */
-#define __NR__llseek            236 /* Linux Specific                              */
-#define __NR_mlock              237
-#define __NR_munlock            238
-#define __NR_mlockall           239
-#define __NR_munlockall         240
-#define __NR_sched_setparam     241
-#define __NR_sched_getparam     242
-#define __NR_sched_setscheduler 243
-#define __NR_sched_getscheduler 244
-#define __NR_sched_yield        245
-#define __NR_sched_get_priority_max 246
-#define __NR_sched_get_priority_min 247
-#define __NR_sched_rr_get_interval  248
-#define __NR_nanosleep          249
-#define __NR_mremap             250
-#define __NR__sysctl            251
-#define __NR_getsid             252
-#define __NR_fdatasync          253
-#define __NR_nfsservctl         254
-#define __NR_sync_file_range   255
-#define __NR_clock_settime     256
-#define __NR_clock_gettime     257
-#define __NR_clock_getres      258
-#define __NR_clock_nanosleep   259
-#define __NR_sched_getaffinity 260
-#define __NR_sched_setaffinity 261
-#define __NR_timer_settime     262
-#define __NR_timer_gettime     263
-#define __NR_timer_getoverrun  264
-#define __NR_timer_delete      265
-#define __NR_timer_create      266
-/* #define __NR_vserver                267 Reserved for VSERVER */
-#define __NR_io_setup          268
-#define __NR_io_destroy                269
-#define __NR_io_submit         270
-#define __NR_io_cancel         271
-#define __NR_io_getevents      272
-#define __NR_mq_open           273
-#define __NR_mq_unlink         274
-#define __NR_mq_timedsend      275
-#define __NR_mq_timedreceive   276
-#define __NR_mq_notify         277
-#define __NR_mq_getsetattr     278
-#define __NR_waitid            279
-#define __NR_tee               280
-#define __NR_add_key           281
-#define __NR_request_key       282
-#define __NR_keyctl            283
-#define __NR_openat            284
-#define __NR_mkdirat           285
-#define __NR_mknodat           286
-#define __NR_fchownat          287
-#define __NR_futimesat         288
-#define __NR_fstatat64         289
-#define __NR_unlinkat          290
-#define __NR_renameat          291
-#define __NR_linkat            292
-#define __NR_symlinkat         293
-#define __NR_readlinkat                294
-#define __NR_fchmodat          295
-#define __NR_faccessat         296
-#define __NR_pselect6          297
-#define __NR_ppoll             298
-#define __NR_unshare           299
-#define __NR_set_robust_list   300
-#define __NR_get_robust_list   301
-#define __NR_migrate_pages     302
-#define __NR_mbind             303
-#define __NR_get_mempolicy     304
-#define __NR_set_mempolicy     305
-#define __NR_kexec_load                306
-#define __NR_move_pages                307
-#define __NR_getcpu            308
-#define __NR_epoll_pwait       309
-#define __NR_utimensat         310
-#define __NR_signalfd          311
-#define __NR_timerfd_create    312
-#define __NR_eventfd           313
-#define __NR_fallocate         314
-#define __NR_timerfd_settime   315
-#define __NR_timerfd_gettime   316
-#define __NR_signalfd4         317
-#define __NR_eventfd2          318
-#define __NR_epoll_create1     319
-#define __NR_dup3              320
-#define __NR_pipe2             321
-#define __NR_inotify_init1     322
-#define __NR_accept4           323
-
-#define NR_SYSCALLS            324
-
-#ifdef __KERNEL__
-#define __ARCH_WANT_IPC_PARSE_VERSION
-#define __ARCH_WANT_OLD_READDIR
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_GETHOSTNAME
-#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
-#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_COMPAT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_WAITPID
-#define __ARCH_WANT_SYS_SOCKETCALL
-#define __ARCH_WANT_SYS_FADVISE64
-#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __ARCH_WANT_SYS_SIGPENDING
-#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
-
-/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-
-#endif /* __KERNEL__ */
-#endif /* _SPARC64_UNISTD_H */
diff --git a/arch/sparc/kernel/.gitignore b/arch/sparc/kernel/.gitignore
new file mode 100644 (file)
index 0000000..c5f676c
--- /dev/null
@@ -0,0 +1 @@
+vmlinux.lds
index 2d658209509943ce5d29fd0d6ebd2553097d4d76..53adcaa0348ba8f50d60c1a7baa1951a858882da 100644 (file)
@@ -2,25 +2,98 @@
 # Makefile for the linux kernel.
 #
 
-extra-y                := head.o init_task.o vmlinux.lds
-
-EXTRA_AFLAGS   := -ansi
-
-IRQ_OBJS := irq.o sun4m_irq.o sun4c_irq.o sun4d_irq.o
-obj-y    := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \
-           process.o signal.o ioport.o setup.o idprom.o \
-           sys_sparc.o systbls.o \
-           time.o windows.o cpu.o devices.o \
-           tadpole.o tick14.o ptrace.o \
-           unaligned.o una_asm.o muldiv.o \
-           prom.o of_device.o devres.o dma.o
-
-devres-y = ../../../kernel/irq/devres.o
-
-obj-$(CONFIG_PCI) += pcic.o
-obj-$(CONFIG_SMP) += trampoline.o smp.o sun4m_smp.o sun4d_smp.o
-obj-$(CONFIG_SUN_AUXIO) += auxio.o
-obj-$(CONFIG_SUN_PM) += apc.o pmc.o
-obj-$(CONFIG_MODULES) += module.o sparc_ksyms.o
-obj-$(CONFIG_SPARC_LED) += led.o
-obj-$(CONFIG_KGDB) += kgdb.o
+asflags-y := -ansi
+ccflags-y := -Werror
+
+extra-y     := head_$(BITS).o
+extra-y     += init_task.o
+extra-y     += vmlinux.lds
+
+obj-$(CONFIG_SPARC32)   += entry.o wof.o wuf.o
+obj-$(CONFIG_SPARC32)   += etrap_32.o
+obj-$(CONFIG_SPARC32)   += rtrap_32.o
+obj-y                   += traps_$(BITS).o
+
+# IRQ
+obj-y                   += irq_$(BITS).o
+obj-$(CONFIG_SPARC32)   += sun4m_irq.o sun4c_irq.o sun4d_irq.o
+
+obj-y                   += process_$(BITS).o
+obj-y                   += signal_$(BITS).o
+obj-$(CONFIG_SPARC32)   += ioport.o
+obj-y                   += setup_$(BITS).o
+obj-y                   += idprom.o
+obj-y                   += sys_sparc_$(BITS).o
+obj-$(CONFIG_SPARC32)   += systbls_32.o
+obj-y                   += time_$(BITS).o
+obj-$(CONFIG_SPARC32)   += windows.o
+obj-y                   += cpu.o
+obj-$(CONFIG_SPARC32)   += devices.o
+obj-$(CONFIG_SPARC32)   += tadpole.o
+obj-$(CONFIG_SPARC32)   += tick14.o
+obj-y                   += ptrace_$(BITS).o
+obj-y                   += unaligned_$(BITS).o
+obj-y                   += una_asm_$(BITS).o
+obj-$(CONFIG_SPARC32)   += muldiv.o
+obj-y                   += prom_common.o
+obj-y                   += prom_$(BITS).o
+obj-y                   += of_device_$(BITS).o
+obj-$(CONFIG_SPARC64)   += prom_irqtrans.o
+
+obj-$(CONFIG_SPARC64)   += reboot.o
+obj-$(CONFIG_SPARC64)   += sysfs.o
+obj-$(CONFIG_SPARC64)   += iommu.o
+obj-$(CONFIG_SPARC64)   += central.o
+obj-$(CONFIG_SPARC64)   += starfire.o
+obj-$(CONFIG_SPARC64)   += power.o
+obj-$(CONFIG_SPARC64)   += sbus.o
+obj-$(CONFIG_SPARC64)   += ebus.o
+obj-$(CONFIG_SPARC64)   += visemul.o
+obj-$(CONFIG_SPARC64)   += hvapi.o
+obj-$(CONFIG_SPARC64)   += sstate.o
+obj-$(CONFIG_SPARC64)   += mdesc.o
+
+# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation
+obj-$(CONFIG_SPARC32)     += devres.o
+devres-y                  := ../../../kernel/irq/devres.o
+
+obj-$(CONFIG_SPARC32)     += dma.o
+
+obj-$(CONFIG_SPARC32_PCI) += pcic.o
+
+obj-$(CONFIG_SMP)         += trampoline_$(BITS).o smp_$(BITS).o
+obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o
+obj-$(CONFIG_SPARC64_SMP) += hvtramp.o
+
+obj-y                     += auxio_$(BITS).o
+obj-$(CONFIG_SUN_PM)      += apc.o pmc.o
+
+obj-$(CONFIG_MODULES)     += module.o
+obj-$(CONFIG_MODULES)     += sparc_ksyms_$(BITS).o
+obj-$(CONFIG_SPARC_LED)   += led.o
+obj-$(CONFIG_KGDB)        += kgdb_$(BITS).o
+
+
+obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
+CFLAGS_REMOVE_ftrace.o := -pg
+
+obj-$(CONFIG_STACKTRACE)     += stacktrace.o
+# sparc64 PCI
+obj-$(CONFIG_SPARC64_PCI)    += pci.o pci_common.o psycho_common.o
+obj-$(CONFIG_SPARC64_PCI)    += pci_psycho.o pci_sabre.o pci_schizo.o
+obj-$(CONFIG_SPARC64_PCI)    += pci_sun4v.o pci_sun4v_asm.o pci_fire.o
+obj-$(CONFIG_PCI_MSI)        += pci_msi.o
+
+obj-$(CONFIG_COMPAT)         += sys32.o sys_sparc32.o signal32.o
+
+# sparc64 cpufreq
+obj-$(CONFIG_US3_FREQ)  += us3_cpufreq.o
+obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o
+obj-$(CONFIG_US3_MC)    += chmc.o
+
+obj-$(CONFIG_KPROBES)   += kprobes.o
+obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o
+
+obj-$(CONFIG_AUDIT)     += audit.o
+audit--$(CONFIG_AUDIT)  := compat_audit.o
+obj-$(CONFIG_COMPAT)    += $(audit--y)
index b5bb99ed892cc459b5f38f919750c8e33e87bc13..68f7e1118e9b3a7ab845282e1378304630da87df 100644 (file)
 // #include <linux/mm.h>
 #include <linux/kbuild.h>
 
-int foo(void)
+#ifdef CONFIG_SPARC32
+int sparc32_foo(void)
 {
-       DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
-       BLANK();
        DEFINE(AOFF_thread_fork_kpsr,
                        offsetof(struct thread_struct, fork_kpsr));
+       return 0;
+}
+#else
+int sparc64_foo(void)
+{
+       return 0;
+}
+#endif
+
+int foo(void)
+{
+       BLANK();
+       DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
        BLANK();
        DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
 
        /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
        return 0;
 }
+
similarity index 67%
rename from arch/sparc64/kernel/auxio.c
rename to arch/sparc/kernel/auxio_64.c
index 858beda86524c96fc7c0dfba4b91736104ff4798..8b67347d4221b42dcf6e6e643b7ec338aac9733a 100644 (file)
@@ -27,73 +27,55 @@ enum auxio_type {
 static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV;
 static DEFINE_SPINLOCK(auxio_lock);
 
-static void __auxio_sbus_set(u8 bits_on, u8 bits_off)
+static void __auxio_rmw(u8 bits_on, u8 bits_off, int ebus)
 {
        if (auxio_register) {
-               unsigned char regval;
                unsigned long flags;
-               unsigned char newval;
+               u8 regval, newval;
 
                spin_lock_irqsave(&auxio_lock, flags);
 
-               regval =  sbus_readb(auxio_register);
+               regval = (ebus ?
+                         (u8) readl(auxio_register) :
+                         sbus_readb(auxio_register));
                newval =  regval | bits_on;
                newval &= ~bits_off;
-               newval &= ~AUXIO_AUX1_MASK;
-               sbus_writeb(newval, auxio_register);
+               if (!ebus)
+                       newval &= ~AUXIO_AUX1_MASK;
+               if (ebus)
+                       writel((u32) newval, auxio_register);
+               else
+                       sbus_writeb(newval, auxio_register);
                
                spin_unlock_irqrestore(&auxio_lock, flags);
        }
 }
 
-static void __auxio_ebus_set(u8 bits_on, u8 bits_off)
+static void __auxio_set_bit(u8 bit, int on, int ebus)
 {
-       if (auxio_register) {
-               unsigned char regval;
-               unsigned long flags;
-               unsigned char newval;
-
-               spin_lock_irqsave(&auxio_lock, flags);
-
-               regval =  (u8)readl(auxio_register);
-               newval =  regval | bits_on;
-               newval &= ~bits_off;
-               writel((u32)newval, auxio_register);
+       u8 bits_on = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
+       u8 bits_off = 0;
 
-               spin_unlock_irqrestore(&auxio_lock, flags);
+       if (!on) {
+               u8 tmp = bits_off;
+               bits_off = bits_on;
+               bits_on = tmp;
        }
-}
-
-static inline void __auxio_ebus_set_led(int on)
-{
-       (on) ? __auxio_ebus_set(AUXIO_PCIO_LED, 0) :
-               __auxio_ebus_set(0, AUXIO_PCIO_LED) ;
-}
-
-static inline void __auxio_sbus_set_led(int on)
-{
-       (on) ? __auxio_sbus_set(AUXIO_AUX1_LED, 0) :
-               __auxio_sbus_set(0, AUXIO_AUX1_LED) ;
+       __auxio_rmw(bits_on, bits_off, ebus);
 }
 
 void auxio_set_led(int on)
 {
-       switch(auxio_devtype) {
-       case AUXIO_TYPE_SBUS:
-               __auxio_sbus_set_led(on);
-               break;
-       case AUXIO_TYPE_EBUS:
-               __auxio_ebus_set_led(on);
-               break;
-       default:
-               break;
-       }
+       int ebus = auxio_devtype == AUXIO_TYPE_EBUS;
+       u8 bit;
+
+       bit = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
+       __auxio_set_bit(bit, on, ebus);
 }
 
-static inline void __auxio_sbus_set_lte(int on)
+static void __auxio_sbus_set_lte(int on)
 {
-       (on) ? __auxio_sbus_set(AUXIO_AUX1_LTE, 0) : 
-               __auxio_sbus_set(0, AUXIO_AUX1_LTE) ;
+       __auxio_set_bit(AUXIO_AUX1_LTE, on, 0);
 }
 
 void auxio_set_lte(int on)
similarity index 98%
rename from arch/sparc64/kernel/cherrs.S
rename to arch/sparc/kernel/cherrs.S
index 89afebd7eca08ef7f00343a4a4484b699242373f..4ee1ad420862d425cff03ba7aad8e395fcb75907 100644 (file)
@@ -102,7 +102,7 @@ cheetah_plus_dcpe_trap_vector:
        .type           do_cheetah_plus_data_parity,#function
 do_cheetah_plus_data_parity:
        rdpr            %pil, %g2
-       wrpr            %g0, 15, %pil
+       wrpr            %g0, PIL_NORMAL_MAX, %pil
        ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -144,7 +144,7 @@ cheetah_plus_icpe_trap_vector:
        .type           do_cheetah_plus_insn_parity,#function
 do_cheetah_plus_insn_parity:
        rdpr            %pil, %g2
-       wrpr            %g0, 15, %pil
+       wrpr            %g0, PIL_NORMAL_MAX, %pil
        ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -492,7 +492,7 @@ cheetah_fast_ecc:
        .type           c_fast_ecc,#function
 c_fast_ecc:
        rdpr            %pil, %g2
-       wrpr            %g0, 15, %pil
+       wrpr            %g0, PIL_NORMAL_MAX, %pil
        ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -528,7 +528,7 @@ cheetah_cee:
        .type           c_cee,#function
 c_cee:
        rdpr            %pil, %g2
-       wrpr            %g0, 15, %pil
+       wrpr            %g0, PIL_NORMAL_MAX, %pil
        ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -564,7 +564,7 @@ cheetah_deferred_trap:
        .type           c_deferred,#function
 c_deferred:
        rdpr            %pil, %g2
-       wrpr            %g0, 15, %pil
+       wrpr            %g0, PIL_NORMAL_MAX, %pil
        ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
 #ifdef CONFIG_TRACE_IRQFLAGS
similarity index 91%
rename from arch/sparc64/kernel/compat_audit.c
rename to arch/sparc/kernel/compat_audit.c
index c831b0a4e660b69d1221230a2d5460db62459fef..d865575b25bf5b2162adfbc293aee40628312888 100644 (file)
@@ -1,4 +1,5 @@
-#include <asm/unistd_32.h>
+#define __32bit_syscall_numbers__
+#include <asm/unistd.h>
 
 unsigned sparc32_dir_class[] = {
 #include <asm-generic/audit_dir_write.h>
index 1fc17f59c6bffc24758d3f44d3e8c71a66f1c8f6..6c2da2420f767d879358c98cfd2e04b3bee06767 100644 (file)
@@ -8,6 +8,8 @@
 #include <linux/init.h>
 #include <linux/smp.h>
 #include <linux/threads.h>
+
+#include <asm/spitfire.h>
 #include <asm/oplib.h>
 #include <asm/page.h>
 #include <asm/head.h>
 #include <asm/mbus.h>
 #include <asm/cpudata.h>
 
+#include "kernel.h"
+
 DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
 
-struct cpu_iu_info {
-  int psr_impl;
-  int psr_vers;
-  char* cpu_name;   /* should be enough I hope... */
+struct cpu_info {
+       int psr_vers;
+       const char *name;
+};
+
+struct fpu_info {
+       int fp_vers;
+       const char *name;
 };
 
-struct cpu_fp_info {
-  int psr_impl;
-  int fp_vers;
-  char* fp_name;
+#define NOCPU 8
+#define NOFPU 8
+
+struct manufacturer_info {
+       int psr_impl;
+       struct cpu_info cpu_info[NOCPU];
+       struct fpu_info fpu_info[NOFPU];
 };
 
+#define CPU(ver, _name) \
+{ .psr_vers = ver, .name = _name }
+
+#define FPU(ver, _name) \
+{ .fp_vers = ver, .name = _name }
+
+static const struct manufacturer_info __initconst manufacturer_info[] = {
+{
+       0,
+       /* Sun4/100, 4/200, SLC */
+       .cpu_info = {
+               CPU(0, "Fujitsu  MB86900/1A or LSI L64831 SparcKIT-40"),
+               /* borned STP1012PGA */
+               CPU(4,  "Fujitsu  MB86904"),
+               CPU(5, "Fujitsu TurboSparc MB86907"),
+               CPU(-1, NULL)
+       },
+       .fpu_info = {
+               FPU(0, "Fujitsu MB86910 or Weitek WTL1164/5"),
+               FPU(1, "Fujitsu MB86911 or Weitek WTL1164/5 or LSI L64831"),
+               FPU(2, "LSI Logic L64802 or Texas Instruments ACT8847"),
+               /* SparcStation SLC, SparcStation1 */
+               FPU(3, "Weitek WTL3170/2"),
+               /* SPARCstation-5 */
+               FPU(4, "Lsi Logic/Meiko L64804 or compatible"),
+               FPU(-1, NULL)
+       }
+},{
+       1,
+       .cpu_info = {
+               /* SparcStation2, SparcServer 490 & 690 */
+               CPU(0, "LSI Logic Corporation - L64811"),
+               /* SparcStation2 */
+               CPU(1, "Cypress/ROSS CY7C601"),
+               /* Embedded controller */
+               CPU(3, "Cypress/ROSS CY7C611"),
+               /* Ross Technologies HyperSparc */
+               CPU(0xf, "ROSS HyperSparc RT620"),
+               CPU(0xe, "ROSS HyperSparc RT625 or RT626"),
+               CPU(-1, NULL)
+       },
+       .fpu_info = {
+               FPU(0, "ROSS HyperSparc combined IU/FPU"),
+               FPU(1, "Lsi Logic L64814"),
+               FPU(2, "Texas Instruments TMS390-C602A"),
+               FPU(3, "Cypress CY7C602 FPU"),
+               FPU(-1, NULL)
+       }
+},{
+       2,
+       .cpu_info = {
+               /* ECL Implementation, CRAY S-MP Supercomputer... AIEEE! */
+               /* Someone please write the code to support this beast! ;) */
+               CPU(0, "Bipolar Integrated Technology - B5010"),
+               CPU(-1, NULL)
+       },
+       .fpu_info = {
+               FPU(-1, NULL)
+       }
+},{
+       3,
+       .cpu_info = {
+               CPU(0, "LSI Logic Corporation - unknown-type"),
+               CPU(-1, NULL)
+       },
+       .fpu_info = {
+               FPU(-1, NULL)
+       }
+},{
+       4,
+       .cpu_info = {
+               CPU(0, "Texas Instruments, Inc. - SuperSparc-(II)"),
+               /* SparcClassic  --  borned STP1010TAB-50*/
+               CPU(1, "Texas Instruments, Inc. - MicroSparc"),
+               CPU(2, "Texas Instruments, Inc. - MicroSparc II"),
+               CPU(3, "Texas Instruments, Inc. - SuperSparc 51"),
+               CPU(4, "Texas Instruments, Inc. - SuperSparc 61"),
+               CPU(5, "Texas Instruments, Inc. - unknown"),
+               CPU(-1, NULL)
+       },
+       .fpu_info = {
+               /* SuperSparc 50 module */
+               FPU(0, "SuperSparc on-chip FPU"),
+               /* SparcClassic */
+               FPU(4, "TI MicroSparc on chip FPU"),
+               FPU(-1, NULL)
+       }
+},{
+       5,
+       .cpu_info = {
+               CPU(0, "Matsushita - MN10501"),
+               CPU(-1, NULL)
+       },
+       .fpu_info = {
+               FPU(0, "Matsushita MN10501"),
+               FPU(-1, NULL)
+       }
+},{
+       6,
+       .cpu_info = {
+               CPU(0, "Philips Corporation - unknown"),
+               CPU(-1, NULL)
+       },
+       .fpu_info = {
+               FPU(-1, NULL)
+       }
+},{
+       7,
+       .cpu_info = {
+               CPU(0, "Harvest VLSI Design Center, Inc. - unknown"),
+               CPU(-1, NULL)
+       },
+       .fpu_info = {
+               FPU(-1, NULL)
+       }
+},{
+       8,
+       .cpu_info = {
+               CPU(0, "Systems and Processes Engineering Corporation (SPEC)"),
+               CPU(-1, NULL)
+       },
+       .fpu_info = {
+               FPU(-1, NULL)
+       }
+},{
+       9,
+       .cpu_info = {
+               /* Gallium arsenide 200MHz, BOOOOGOOOOMIPS!!! */
+               CPU(0, "Fujitsu or Weitek Power-UP"),
+               CPU(1, "Fujitsu or Weitek Power-UP"),
+               CPU(2, "Fujitsu or Weitek Power-UP"),
+               CPU(3, "Fujitsu or Weitek Power-UP"),
+               CPU(-1, NULL)
+       },
+       .fpu_info = {
+               FPU(3, "Fujitsu or Weitek on-chip FPU"),
+               FPU(-1, NULL)
+       }
+},{
+       0x17,
+       .cpu_info = {
+               CPU(0x10, "TI UltraSparc I   (SpitFire)"),
+               CPU(0x11, "TI UltraSparc II  (BlackBird)"),
+               CPU(0x12, "TI UltraSparc IIi (Sabre)"),
+               CPU(0x13, "TI UltraSparc IIe (Hummingbird)"),
+               CPU(-1, NULL)
+       },
+       .fpu_info = {
+               FPU(0x10, "UltraSparc I integrated FPU"),
+               FPU(0x11, "UltraSparc II integrated FPU"),
+               FPU(0x12, "UltraSparc IIi integrated FPU"),
+               FPU(0x13, "UltraSparc IIe integrated FPU"),
+               FPU(-1, NULL)
+       }
+},{
+       0x22,
+       .cpu_info = {
+               CPU(0x10, "TI UltraSparc I   (SpitFire)"),
+               CPU(-1, NULL)
+       },
+       .fpu_info = {
+               FPU(0x10, "UltraSparc I integrated FPU"),
+               FPU(-1, NULL)
+       }
+},{
+       0x3e,
+       .cpu_info = {
+               CPU(0x14, "TI UltraSparc III (Cheetah)"),
+               CPU(0x15, "TI UltraSparc III+ (Cheetah+)"),
+               CPU(0x16, "TI UltraSparc IIIi (Jalapeno)"),
+               CPU(0x18, "TI UltraSparc IV (Jaguar)"),
+               CPU(0x19, "TI UltraSparc IV+ (Panther)"),
+               CPU(0x22, "TI UltraSparc IIIi+ (Serrano)"),
+               CPU(-1, NULL)
+       },
+       .fpu_info = {
+               FPU(0x14, "UltraSparc III integrated FPU"),
+               FPU(0x15, "UltraSparc III+ integrated FPU"),
+               FPU(0x16, "UltraSparc IIIi integrated FPU"),
+               FPU(0x18, "UltraSparc IV integrated FPU"),
+               FPU(0x19, "UltraSparc IV+ integrated FPU"),
+               FPU(0x22, "UltraSparc IIIi+ integrated FPU"),
+               FPU(-1, NULL)
+       }
+}};
+
 /* In order to get the fpu type correct, you need to take the IDPROM's
  * machine type value into consideration too.  I will fix this.
  */
-static struct cpu_fp_info linux_sparc_fpu[] = {
-  { 0, 0, "Fujitsu MB86910 or Weitek WTL1164/5"},
-  { 0, 1, "Fujitsu MB86911 or Weitek WTL1164/5 or LSI L64831"},
-  { 0, 2, "LSI Logic L64802 or Texas Instruments ACT8847"},
-  /* SparcStation SLC, SparcStation1 */
-  { 0, 3, "Weitek WTL3170/2"},
-  /* SPARCstation-5 */
-  { 0, 4, "Lsi Logic/Meiko L64804 or compatible"},
-  { 0, 5, "reserved"},
-  { 0, 6, "reserved"},
-  { 0, 7, "No FPU"},
-  { 1, 0, "ROSS HyperSparc combined IU/FPU"},
-  { 1, 1, "Lsi Logic L64814"},
-  { 1, 2, "Texas Instruments TMS390-C602A"},
-  { 1, 3, "Cypress CY7C602 FPU"},
-  { 1, 4, "reserved"},
-  { 1, 5, "reserved"},
-  { 1, 6, "reserved"},
-  { 1, 7, "No FPU"},
-  { 2, 0, "BIT B5010 or B5110/20 or B5210"},
-  { 2, 1, "reserved"},
-  { 2, 2, "reserved"},
-  { 2, 3, "reserved"},
-  { 2, 4, "reserved"},
-  { 2, 5, "reserved"},
-  { 2, 6, "reserved"},
-  { 2, 7, "No FPU"},
-  /* SuperSparc 50 module */
-  { 4, 0, "SuperSparc on-chip FPU"},
-  /* SparcClassic */
-  { 4, 4, "TI MicroSparc on chip FPU"},
-  { 5, 0, "Matsushita MN10501"},
-  { 5, 1, "reserved"},
-  { 5, 2, "reserved"},
-  { 5, 3, "reserved"},
-  { 5, 4, "reserved"},
-  { 5, 5, "reserved"},
-  { 5, 6, "reserved"},
-  { 5, 7, "No FPU"},
-  { 9, 3, "Fujitsu or Weitek on-chip FPU"},
-};
 
-#define NSPARCFPU  ARRAY_SIZE(linux_sparc_fpu)
-
-static struct cpu_iu_info linux_sparc_chips[] = {
-  /* Sun4/100, 4/200, SLC */
-  { 0, 0, "Fujitsu  MB86900/1A or LSI L64831 SparcKIT-40"},
-  /* borned STP1012PGA */
-  { 0, 4, "Fujitsu  MB86904"},
-  { 0, 5, "Fujitsu TurboSparc MB86907"},
-  /* SparcStation2, SparcServer 490 & 690 */
-  { 1, 0, "LSI Logic Corporation - L64811"},
-  /* SparcStation2 */
-  { 1, 1, "Cypress/ROSS CY7C601"},
-  /* Embedded controller */
-  { 1, 3, "Cypress/ROSS CY7C611"},
-  /* Ross Technologies HyperSparc */
-  { 1, 0xf, "ROSS HyperSparc RT620"},
-  { 1, 0xe, "ROSS HyperSparc RT625 or RT626"},
-  /* ECL Implementation, CRAY S-MP Supercomputer... AIEEE! */
-  /* Someone please write the code to support this beast! ;) */
-  { 2, 0, "Bipolar Integrated Technology - B5010"},
-  { 3, 0, "LSI Logic Corporation - unknown-type"},
-  { 4, 0, "Texas Instruments, Inc. - SuperSparc-(II)"},
-  /* SparcClassic  --  borned STP1010TAB-50*/
-  { 4, 1, "Texas Instruments, Inc. - MicroSparc"},
-  { 4, 2, "Texas Instruments, Inc. - MicroSparc II"},
-  { 4, 3, "Texas Instruments, Inc. - SuperSparc 51"},
-  { 4, 4, "Texas Instruments, Inc. - SuperSparc 61"},
-  { 4, 5, "Texas Instruments, Inc. - unknown"},
-  { 5, 0, "Matsushita - MN10501"},
-  { 6, 0, "Philips Corporation - unknown"},
-  { 7, 0, "Harvest VLSI Design Center, Inc. - unknown"},
-  /* Gallium arsenide 200MHz, BOOOOGOOOOMIPS!!! */
-  { 8, 0, "Systems and Processes Engineering Corporation (SPEC)"},
-  { 9, 0, "Fujitsu or Weitek Power-UP"},
-  { 9, 1, "Fujitsu or Weitek Power-UP"},
-  { 9, 2, "Fujitsu or Weitek Power-UP"},
-  { 9, 3, "Fujitsu or Weitek Power-UP"},
-  { 0xa, 0, "UNKNOWN CPU-VENDOR/TYPE"},
-  { 0xb, 0, "UNKNOWN CPU-VENDOR/TYPE"},
-  { 0xc, 0, "UNKNOWN CPU-VENDOR/TYPE"},
-  { 0xd, 0, "UNKNOWN CPU-VENDOR/TYPE"},
-  { 0xe, 0, "UNKNOWN CPU-VENDOR/TYPE"},
-  { 0xf, 0, "UNKNOWN CPU-VENDOR/TYPE"},
-};
+const char *sparc_cpu_type;
+const char *sparc_fpu_type;
 
-#define NSPARCCHIPS  ARRAY_SIZE(linux_sparc_chips)
+unsigned int fsr_storage;
 
-char *sparc_cpu_type;
-char *sparc_fpu_type;
+static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
+{
+       sparc_cpu_type = NULL;
+       sparc_fpu_type = NULL;
+       if (psr_impl < ARRAY_SIZE(manufacturer_info))
+       {
+               const struct cpu_info *cpu;
+               const struct fpu_info *fpu;
 
-unsigned int fsr_storage;
+               cpu = &manufacturer_info[psr_impl].cpu_info[0];
+               while (cpu->psr_vers != -1)
+               {
+                       if (cpu->psr_vers == psr_vers) {
+                               sparc_cpu_type = cpu->name;
+                               sparc_fpu_type = "No FPU";
+                               break;
+                       }
+                       cpu++;
+               }
+               fpu =  &manufacturer_info[psr_impl].fpu_info[0];
+               while (fpu->fp_vers != -1)
+               {
+                       if (fpu->fp_vers == fpu_vers) {
+                               sparc_fpu_type = fpu->name;
+                               break;
+                       }
+                       fpu++;
+               }
+       }
+       if (sparc_cpu_type == NULL)
+       {
+               printk(KERN_ERR "CPU: Unknown chip, impl[0x%x] vers[0x%x]\n",
+                      psr_impl, psr_vers);
+               sparc_cpu_type = "Unknown CPU";
+       }
+       if (sparc_fpu_type == NULL)
+       {
+               printk(KERN_ERR "FPU: Unknown chip, impl[0x%x] vers[0x%x]\n",
+                      psr_impl, fpu_vers);
+               sparc_fpu_type = "Unknown FPU";
+       }
+}
 
+#ifdef CONFIG_SPARC32
 void __cpuinit cpu_probe(void)
 {
        int psr_impl, psr_vers, fpu_vers;
-       int i, psr;
+       int psr;
 
-       psr_impl = ((get_psr()>>28)&0xf);
-       psr_vers = ((get_psr()>>24)&0xf);
+       psr_impl = ((get_psr() >> 28) & 0xf);
+       psr_vers = ((get_psr() >> 24) & 0xf);
 
        psr = get_psr();
        put_psr(psr | PSR_EF);
-       fpu_vers = ((get_fsr()>>17)&0x7);
+       fpu_vers = ((get_fsr() >> 17) & 0x7);
        put_psr(psr);
 
-       for(i = 0; i<NSPARCCHIPS; i++) {
-               if(linux_sparc_chips[i].psr_impl == psr_impl)
-                       if(linux_sparc_chips[i].psr_vers == psr_vers) {
-                               sparc_cpu_type = linux_sparc_chips[i].cpu_name;
-                               break;
-                       }
-       }
+       set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers);
+}
+#else
+static void __init sun4v_cpu_probe(void)
+{
+       switch (sun4v_chip_type) {
+       case SUN4V_CHIP_NIAGARA1:
+               sparc_cpu_type = "UltraSparc T1 (Niagara)";
+               sparc_fpu_type = "UltraSparc T1 integrated FPU";
+               break;
 
-       if(i==NSPARCCHIPS)
-               printk("DEBUG: psr.impl = 0x%x   psr.vers = 0x%x\n", psr_impl, 
-                           psr_vers);
+       case SUN4V_CHIP_NIAGARA2:
+               sparc_cpu_type = "UltraSparc T2 (Niagara2)";
+               sparc_fpu_type = "UltraSparc T2 integrated FPU";
+               break;
 
-       for(i = 0; i<NSPARCFPU; i++) {
-               if(linux_sparc_fpu[i].psr_impl == psr_impl)
-                       if(linux_sparc_fpu[i].fp_vers == fpu_vers) {
-                               sparc_fpu_type = linux_sparc_fpu[i].fp_name;
-                               break;
-                       }
+       default:
+               printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
+                      prom_cpu_compatible);
+               sparc_cpu_type = "Unknown SUN4V CPU";
+               sparc_fpu_type = "Unknown SUN4V FPU";
+               break;
        }
+}
+
+static int __init cpu_type_probe(void)
+{
+       if (tlb_type == hypervisor) {
+               sun4v_cpu_probe();
+       } else {
+               unsigned long ver;
+               int manuf, impl;
 
-       if(i == NSPARCFPU) {
-               printk("DEBUG: psr.impl = 0x%x  fsr.vers = 0x%x\n", psr_impl,
-                           fpu_vers);
-               sparc_fpu_type = linux_sparc_fpu[31].fp_name;
+               __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
+
+               manuf = ((ver >> 48) & 0xffff);
+               impl = ((ver >> 32) & 0xffff);
+               set_cpu_and_fpu(manuf, impl, impl);
        }
+       return 0;
 }
+
+arch_initcall(cpu_type_probe);
+#endif
index ad656b044b8c2b9a560af4cdfaf2a05e65634f2a..b171ae8de90dc5b49aa410c03c166603c37b4d02 100644 (file)
@@ -133,14 +133,12 @@ void __init device_scan(void)
 #endif /* !CONFIG_SMP */
 
        cpu_probe();
-#ifdef CONFIG_SUN_AUXIO
        {
                extern void auxio_probe(void);
                extern void auxio_power_probe(void);
                auxio_probe();
                auxio_power_probe();
        }
-#endif
        clock_stop_probe();
 
        if (ARCH_SUN4C)
similarity index 79%
rename from arch/sparc64/kernel/entry.h
rename to arch/sparc/kernel/entry.h
index 34d7ab5e10d23c7e0023fa31af791814e341b95d..4f53a2395ac6364ed3f52f8226dc94ecb7e5fc4e 100644 (file)
@@ -5,9 +5,43 @@
 #include <linux/types.h>
 #include <linux/init.h>
 
-extern const char *sparc_cpu_type;
-extern const char *sparc_fpu_type;
+/* irq */
+extern void handler_irq(int irq, struct pt_regs *regs);
 
+#ifdef CONFIG_SPARC32
+/* traps */
+extern void do_hw_interrupt(struct pt_regs *regs, unsigned long type);
+extern void do_illegal_instruction(struct pt_regs *regs, unsigned long pc,
+                                   unsigned long npc, unsigned long psr);
+
+extern void do_priv_instruction(struct pt_regs *regs, unsigned long pc,
+                                unsigned long npc, unsigned long psr);
+extern void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc,
+                                   unsigned long npc,
+                                   unsigned long psr);
+extern void do_fpd_trap(struct pt_regs *regs, unsigned long pc,
+                        unsigned long npc, unsigned long psr);
+extern void do_fpe_trap(struct pt_regs *regs, unsigned long pc,
+                        unsigned long npc, unsigned long psr);
+extern void handle_tag_overflow(struct pt_regs *regs, unsigned long pc,
+                                unsigned long npc, unsigned long psr);
+extern void handle_watchpoint(struct pt_regs *regs, unsigned long pc,
+                              unsigned long npc, unsigned long psr);
+extern void handle_reg_access(struct pt_regs *regs, unsigned long pc,
+                              unsigned long npc, unsigned long psr);
+extern void handle_cp_disabled(struct pt_regs *regs, unsigned long pc,
+                               unsigned long npc, unsigned long psr);
+extern void handle_cp_exception(struct pt_regs *regs, unsigned long pc,
+                                unsigned long npc, unsigned long psr);
+
+
+
+/* entry.S */
+extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
+                   void *fpqueue, unsigned long *fpqdepth);
+extern void fpload(unsigned long *fpregs, unsigned long *fsr);
+
+#else /* CONFIG_SPARC32 */
 extern void __init per_cpu_patch(void);
 extern void __init sun4v_patch(void);
 extern void __init boot_cpu_id_too_large(int cpu);
@@ -188,8 +222,8 @@ struct ino_bucket {
 extern struct ino_bucket *ivector_table;
 extern unsigned long ivector_table_pa;
 
-extern void handler_irq(int irq, struct pt_regs *regs);
 extern void init_irqwork_curcpu(void);
 extern void __cpuinit sun4v_register_mondo_queues(int this_cpu);
 
+#endif /* CONFIG_SPARC32 */
 #endif /* _ENTRY_H */
similarity index 97%
rename from arch/sparc64/kernel/etrap.S
rename to arch/sparc/kernel/etrap_64.S
index 29ce489bc1889d2b01cf59fc432562098b574e55..786b185e6e3fa8d37e6cac8b820b0654b1b787c3 100644 (file)
@@ -16,9 +16,9 @@
 #include <asm/mmu.h>
 
 #define                TASK_REGOFF             (THREAD_SIZE-TRACEREG_SZ-STACKFRAME_SZ)
-#define                ETRAP_PSTATE1           (PSTATE_RMO | PSTATE_PRIV)
+#define                ETRAP_PSTATE1           (PSTATE_TSO | PSTATE_PRIV)
 #define                ETRAP_PSTATE2           \
-               (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE)
+               (PSTATE_TSO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE)
 
 /*
  * On entry, %g7 is return address - 0x4.
@@ -130,7 +130,7 @@ etrap_save: save    %g2, -STACK_BIAS, %sp
                stx     %g6, [%sp + PTREGS_OFF + PT_V9_G6]
                stx     %g7, [%sp + PTREGS_OFF + PT_V9_G7]
                or      %l7, %l0, %l7
-               sethi   %hi(TSTATE_RMO | TSTATE_PEF), %l0
+               sethi   %hi(TSTATE_TSO | TSTATE_PEF), %l0
                or      %l7, %l0, %l7
                wrpr    %l2, %tnpc
                wrpr    %l7, (TSTATE_PRIV | TSTATE_IE), %tstate
similarity index 99%
rename from arch/sparc/kernel/head.S
rename to arch/sparc/kernel/head_32.S
index 51b40426f9c68e1ba0cde5fd4061c19efa02e9cf..f0b4b516304f48cb1863b1ebda7fafa80a0e5ab7 100644 (file)
@@ -990,7 +990,7 @@ sun4c_continue_boot:
 
                /* Zero out our BSS section. */
                set     __bss_start , %o0       ! First address of BSS
-               set     end , %o1               ! Last address of BSS
+               set     _end , %o1              ! Last address of BSS
                add     %o0, 0x1, %o0
 1:     
                stb     %g0, [%o0]
similarity index 99%
rename from arch/sparc64/kernel/head.S
rename to arch/sparc/kernel/head_64.S
index 353226fa023991240653fadf8504b958dc4529ae..8ffee714f932a026fdeea5d2c2973dbcd724bdb0 100644 (file)
@@ -706,7 +706,7 @@ setup_trap_table:
        andn    %l0, PSTATE_IE, %o1
        wrpr    %o1, 0x0, %pstate
        rdpr    %pil, %l1
-       wrpr    %g0, 15, %pil
+       wrpr    %g0, PIL_NORMAL_MAX, %pil
 
        /* Make the firmware call to jump over to the Linux trap table.  */
        sethi   %hi(is_sun4v), %o0
@@ -825,8 +825,8 @@ setup_tba:
         restore
 sparc64_boot_end:
 
-#include "etrap.S"
-#include "rtrap.S"
+#include "etrap_64.S"
+#include "rtrap_64.S"
 #include "winfixup.S"
 #include "fpu_traps.S"
 #include "ivec.S"
@@ -882,7 +882,7 @@ swapper_4m_tsb:
 
 ! 0x0000000000428000
 
-#include "systbls.S"
+#include "systbls_64.S"
 
        .data
        .align  8
similarity index 96%
rename from arch/sparc64/kernel/hvcalls.S
rename to arch/sparc/kernel/hvcalls.S
index e066269d1594191c00c9405e21b8eb24ae915a3d..8a5f35ffb15ef622ddc9a0e8ecdc5c2a26142c01 100644 (file)
@@ -766,3 +766,35 @@ ENTRY(sun4v_mmu_demap_all)
        retl
         nop
 ENDPROC(sun4v_mmu_demap_all)
+
+ENTRY(sun4v_niagara_getperf)
+       mov     %o0, %o4
+       mov     HV_FAST_GET_PERFREG, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+        nop
+ENDPROC(sun4v_niagara_getperf)
+
+ENTRY(sun4v_niagara_setperf)
+       mov     HV_FAST_SET_PERFREG, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+ENDPROC(sun4v_niagara_setperf)
+
+ENTRY(sun4v_niagara2_getperf)
+       mov     %o0, %o4
+       mov     HV_FAST_N2_GET_PERFREG, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+        nop
+ENDPROC(sun4v_niagara2_getperf)
+
+ENTRY(sun4v_niagara2_setperf)
+       mov     HV_FAST_N2_SET_PERFREG, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+ENDPROC(sun4v_niagara2_setperf)
similarity index 95%
rename from arch/sparc64/kernel/hvtramp.S
rename to arch/sparc/kernel/hvtramp.S
index 0236c43772faf50385fd5348fb4f16d34dfcc98b..9365432904d64eb7ef017506991eab7bcbea4f2f 100644 (file)
@@ -1,6 +1,6 @@
 /* hvtramp.S: Hypervisor start-cpu trampoline code.
  *
- * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
  */
 
 #include <linux/init.h>
@@ -14,6 +14,7 @@
 #include <asm/ptrace.h>
 #include <asm/head.h>
 #include <asm/asi.h>
+#include <asm/pil.h>
 
        __CPUINIT
        .align          8
@@ -32,7 +33,7 @@
         */
 hv_cpu_startup:
        SET_GL(0)
-       wrpr            %g0, 15, %pil
+       wrpr            %g0, PIL_NORMAL_MAX, %pil
        wrpr            %g0, 0, %canrestore
        wrpr            %g0, 0, %otherwin
        wrpr            %g0, 6, %cansave
index 223a6582e1e208c70e622c61443196c1d5de9a61..c16135e0c151bea8337c605d391b5fbf6828c0b1 100644 (file)
 
 #include <asm/oplib.h>
 #include <asm/idprom.h>
-#include <asm/machines.h>  /* Fun with Sun released architectures. */
 
 struct idprom *idprom;
 static struct idprom idprom_buffer;
 
+#ifdef CONFIG_SPARC32
+#include <asm/machines.h>  /* Fun with Sun released architectures. */
+
 /* Here is the master table of Sun machines which use some implementation
  * of the Sparc CPU and have a meaningful IDPROM machtype value that we
  * know about.  See asm-sparc/machines.h for empirical constants.
  */
 static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
 /* First, Sun4's */
-{ "Sun 4/100 Series", (SM_SUN4 | SM_4_110) },
-{ "Sun 4/200 Series", (SM_SUN4 | SM_4_260) },
-{ "Sun 4/300 Series", (SM_SUN4 | SM_4_330) },
-{ "Sun 4/400 Series", (SM_SUN4 | SM_4_470) },
+{ .name = "Sun 4/100 Series",        .id_machtype = (SM_SUN4 | SM_4_110) },
+{ .name = "Sun 4/200 Series",        .id_machtype = (SM_SUN4 | SM_4_260) },
+{ .name = "Sun 4/300 Series",        .id_machtype = (SM_SUN4 | SM_4_330) },
+{ .name = "Sun 4/400 Series",        .id_machtype = (SM_SUN4 | SM_4_470) },
 /* Now, Sun4c's */
-{ "Sun4c SparcStation 1", (SM_SUN4C | SM_4C_SS1) },
-{ "Sun4c SparcStation IPC", (SM_SUN4C | SM_4C_IPC) },
-{ "Sun4c SparcStation 1+", (SM_SUN4C | SM_4C_SS1PLUS) },
-{ "Sun4c SparcStation SLC", (SM_SUN4C | SM_4C_SLC) },
-{ "Sun4c SparcStation 2", (SM_SUN4C | SM_4C_SS2) },
-{ "Sun4c SparcStation ELC", (SM_SUN4C | SM_4C_ELC) },
-{ "Sun4c SparcStation IPX", (SM_SUN4C | SM_4C_IPX) },
+{ .name = "Sun4c SparcStation 1",    .id_machtype = (SM_SUN4C | SM_4C_SS1) },
+{ .name = "Sun4c SparcStation IPC",  .id_machtype = (SM_SUN4C | SM_4C_IPC) },
+{ .name = "Sun4c SparcStation 1+",   .id_machtype = (SM_SUN4C | SM_4C_SS1PLUS) },
+{ .name = "Sun4c SparcStation SLC",  .id_machtype = (SM_SUN4C | SM_4C_SLC) },
+{ .name = "Sun4c SparcStation 2",    .id_machtype = (SM_SUN4C | SM_4C_SS2) },
+{ .name = "Sun4c SparcStation ELC",  .id_machtype = (SM_SUN4C | SM_4C_ELC) },
+{ .name = "Sun4c SparcStation IPX",  .id_machtype = (SM_SUN4C | SM_4C_IPX) },
 /* Finally, early Sun4m's */
-{ "Sun4m SparcSystem600", (SM_SUN4M | SM_4M_SS60) },
-{ "Sun4m SparcStation10/20", (SM_SUN4M | SM_4M_SS50) },
-{ "Sun4m SparcStation5", (SM_SUN4M | SM_4M_SS40) },
+{ .name = "Sun4m SparcSystem600",    .id_machtype = (SM_SUN4M | SM_4M_SS60) },
+{ .name = "Sun4m SparcStation10/20", .id_machtype = (SM_SUN4M | SM_4M_SS50) },
+{ .name = "Sun4m SparcStation5",     .id_machtype = (SM_SUN4M | SM_4M_SS40) },
 /* One entry for the OBP arch's which are sun4d, sun4e, and newer sun4m's */
-{ "Sun4M OBP based system", (SM_SUN4M_OBP | 0x0) } };
+{ .name = "Sun4M OBP based system",  .id_machtype = (SM_SUN4M_OBP | 0x0) } };
 
 static void __init display_system_type(unsigned char machtype)
 {
@@ -47,21 +49,25 @@ static void __init display_system_type(unsigned char machtype)
        register int i;
 
        for (i = 0; i < NUM_SUN_MACHINES; i++) {
-               if(Sun_Machines[i].id_machtype == machtype) {
+               if (Sun_Machines[i].id_machtype == machtype) {
                        if (machtype != (SM_SUN4M_OBP | 0x00) ||
                            prom_getproperty(prom_root_node, "banner-name",
                                             sysname, sizeof(sysname)) <= 0)
-                               printk("TYPE: %s\n", Sun_Machines[i].name);
+                               printk(KERN_WARNING "TYPE: %s\n",
+                                      Sun_Machines[i].name);
                        else
-                               printk("TYPE: %s\n", sysname);
+                               printk(KERN_WARNING "TYPE: %s\n", sysname);
                        return;
                }
        }
 
-       prom_printf("IDPROM: Bogus id_machtype value, 0x%x\n", machtype);
-       prom_halt();
+       prom_printf("IDPROM: Warning, bogus id_machtype value, 0x%x\n", machtype);
 }
-
+#else
+static void __init display_system_type(unsigned char machtype)
+{
+}
+#endif
 /* Calculate the IDPROM checksum (xor of the data bytes). */
 static unsigned char __init calc_idprom_cksum(struct idprom *idprom)
 {
@@ -80,21 +86,14 @@ void __init idprom_init(void)
 
        idprom = &idprom_buffer;
 
-       if (idprom->id_format != 0x01)  {
-               prom_printf("IDPROM: Unknown format type!\n");
-               prom_halt();
-       }
+       if (idprom->id_format != 0x01)
+               prom_printf("IDPROM: Warning, unknown format type!\n");
 
-       if (idprom->id_cksum != calc_idprom_cksum(idprom)) {
-               prom_printf("IDPROM: Checksum failure (nvram=%x, calc=%x)!\n",
+       if (idprom->id_cksum != calc_idprom_cksum(idprom))
+               prom_printf("IDPROM: Warning, checksum failure (nvram=%x, calc=%x)!\n",
                            idprom->id_cksum, calc_idprom_cksum(idprom));
-               prom_halt();
-       }
 
        display_system_type(idprom->id_machtype);
 
-       printk("Ethernet address: %x:%x:%x:%x:%x:%x\n",
-                   idprom->id_ethaddr[0], idprom->id_ethaddr[1],
-                   idprom->id_ethaddr[2], idprom->id_ethaddr[3],
-                   idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
+       printk(KERN_WARNING "Ethernet address: %pM\n", idprom->id_ethaddr);
 }
index 8e64ebc445ef8c158f8fd594ed424fd24efda6e6..62126e4cec54c53c2572a93d3bf4055fe22ba709 100644 (file)
@@ -23,6 +23,5 @@ EXPORT_SYMBOL(init_task);
  * in etrap.S which assumes it.
  */
 union thread_union init_thread_union
-       __attribute__((section (".text\"\n\t#")))
-       __attribute__((aligned (THREAD_SIZE)))
+       __attribute__((section (".data.init_task")))
        = { INIT_THREAD_INFO(init_task) };
index 4f025b36934b2e53fb833c5e94f9c071fb4efbda..7ce14f05eb484f2fca5eb51f7dc193025de90cfa 100644 (file)
@@ -552,8 +552,8 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
        /* IIep is write-through, not flushing. */
        for_each_sg(sgl, sg, nents, n) {
                BUG_ON(page_address(sg_page(sg)) == NULL);
-               sg->dvma_address = virt_to_phys(sg_virt(sg));
-               sg->dvma_length = sg->length;
+               sg->dma_address = virt_to_phys(sg_virt(sg));
+               sg->dma_length = sg->length;
        }
        return nents;
 }
similarity index 99%
rename from arch/sparc/kernel/irq.c
rename to arch/sparc/kernel/irq_32.c
index 93e1d1c65290b8008aef53d02d4de1f0e24c250a..f3488c45d57a0d7867b9292af1f2a01c2cbc6c56 100644 (file)
@@ -46,6 +46,7 @@
 #include <asm/cacheflush.h>
 #include <asm/irq_regs.h>
 
+#include "kernel.h"
 #include "irq.h"
 
 #ifdef CONFIG_SMP
@@ -592,19 +593,19 @@ EXPORT_SYMBOL(request_irq);
 
 void disable_irq_nosync(unsigned int irq)
 {
-       return __disable_irq(irq);
+       __disable_irq(irq);
 }
 EXPORT_SYMBOL(disable_irq_nosync);
 
 void disable_irq(unsigned int irq)
 {
-       return __disable_irq(irq);
+       __disable_irq(irq);
 }
 EXPORT_SYMBOL(disable_irq);
 
 void enable_irq(unsigned int irq)
 {
-       return __enable_irq(irq);
+       __enable_irq(irq);
 }
 
 EXPORT_SYMBOL(enable_irq);
similarity index 94%
rename from arch/sparc64/kernel/irq.c
rename to arch/sparc/kernel/irq_64.c
index 52fc836f464d979655c330b43b15736fc6c840a2..a3ea2bcb95de6a39ebb7bf297ee2355f507f178c 100644 (file)
@@ -775,6 +775,69 @@ void do_softirq(void)
        local_irq_restore(flags);
 }
 
+static void unhandled_perf_irq(struct pt_regs *regs)
+{
+       unsigned long pcr, pic;
+
+       read_pcr(pcr);
+       read_pic(pic);
+
+       write_pcr(0);
+
+       printk(KERN_EMERG "CPU %d: Got unexpected perf counter IRQ.\n",
+              smp_processor_id());
+       printk(KERN_EMERG "CPU %d: PCR[%016lx] PIC[%016lx]\n",
+              smp_processor_id(), pcr, pic);
+}
+
+/* Almost a direct copy of the powerpc PMC code.  */
+static DEFINE_SPINLOCK(perf_irq_lock);
+static void *perf_irq_owner_caller; /* mostly for debugging */
+static void (*perf_irq)(struct pt_regs *regs) = unhandled_perf_irq;
+
+/* Invoked from level 15 PIL handler in trap table.  */
+void perfctr_irq(int irq, struct pt_regs *regs)
+{
+       clear_softint(1 << irq);
+       perf_irq(regs);
+}
+
+int register_perfctr_intr(void (*handler)(struct pt_regs *))
+{
+       int ret;
+
+       if (!handler)
+               return -EINVAL;
+
+       spin_lock(&perf_irq_lock);
+       if (perf_irq != unhandled_perf_irq) {
+               printk(KERN_WARNING "register_perfctr_intr: "
+                      "perf IRQ busy (reserved by caller %p)\n",
+                      perf_irq_owner_caller);
+               ret = -EBUSY;
+               goto out;
+       }
+
+       perf_irq_owner_caller = __builtin_return_address(0);
+       perf_irq = handler;
+
+       ret = 0;
+out:
+       spin_unlock(&perf_irq_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(register_perfctr_intr);
+
+void release_perfctr_intr(void (*handler)(struct pt_regs *))
+{
+       spin_lock(&perf_irq_lock);
+       perf_irq_owner_caller = NULL;
+       perf_irq = unhandled_perf_irq;
+       spin_unlock(&perf_irq_lock);
+}
+EXPORT_SYMBOL_GPL(release_perfctr_intr);
+
 #ifdef CONFIG_HOTPLUG_CPU
 void fixup_irqs(void)
 {
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
new file mode 100644 (file)
index 0000000..81a972e
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __SPARC_KERNEL_H
+#define __SPARC_KERNEL_H
+
+#include <linux/interrupt.h>
+
+/* cpu.c */
+extern const char *sparc_cpu_type;
+extern const char *sparc_fpu_type;
+
+extern unsigned int fsr_storage;
+
+#ifdef CONFIG_SPARC32
+/* cpu.c */
+extern void cpu_probe(void);
+
+/* traps_32.c */
+extern void handle_hw_divzero(struct pt_regs *regs, unsigned long pc,
+                              unsigned long npc, unsigned long psr);
+/* muldiv.c */
+extern int do_user_muldiv (struct pt_regs *, unsigned long);
+
+/* irq_32.c */
+extern struct irqaction static_irqaction[];
+extern int static_irq_count;
+extern spinlock_t irq_action_lock;
+
+extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
+
+#else /* CONFIG_SPARC32 */
+#endif /* CONFIG_SPARC32 */
+#endif /* !(__SPARC_KERNEL_H) */
similarity index 99%
rename from arch/sparc64/kernel/mdesc.c
rename to arch/sparc/kernel/mdesc.c
index dde52bcf5c64e11daa1cd22558238ebd45eb46b9..3c539a6d7c18095a68532221007e2a43dc4aee9d 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/mm.h>
 #include <linux/miscdevice.h>
 
+#include <asm/cpudata.h>
 #include <asm/hypervisor.h>
 #include <asm/mdesc.h>
 #include <asm/prom.h>
index 598682f31ebfd942eec586fc23fd6919ce42d710..90273765e81f95b7d07250fe35dc29cc5d122ecc 100644 (file)
@@ -1,4 +1,4 @@
-/* Kernel module help for sparc32.
+/* Kernel module help for sparc64.
  *
  * Copyright (C) 2001 Rusty Russell.
  * Copyright (C) 2002 David S. Miller.
 #include <linux/fs.h>
 #include <linux/string.h>
 #include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include <asm/processor.h>
+#include <asm/spitfire.h>
+
+#ifdef CONFIG_SPARC64
+static void *module_map(unsigned long size)
+{
+       struct vm_struct *area;
+
+       size = PAGE_ALIGN(size);
+       if (!size || size > MODULES_LEN)
+               return NULL;
+
+       area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
+       if (!area)
+               return NULL;
+
+       return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
+}
+
+static char *dot2underscore(char *name)
+{
+       return name;
+}
+#else
+static void *module_map(unsigned long size)
+{
+       return vmalloc(size);
+}
+
+/* Replace references to .func with _Func */
+static char *dot2underscore(char *name)
+{
+       if (name[0] == '.') {
+               name[0] = '_';
+                name[1] = toupper(name[1]);
+       }
+       return name;
+}
+#endif /* CONFIG_SPARC64 */
 
 void *module_alloc(unsigned long size)
 {
@@ -20,7 +62,7 @@ void *module_alloc(unsigned long size)
        if (size == 0)
                return NULL;
 
-       ret = vmalloc(size);
+       ret = module_map(size);
        if (!ret)
                ret = ERR_PTR(-ENOMEM);
        else
@@ -37,16 +79,14 @@ void module_free(struct module *mod, void *module_region)
            table entries. */
 }
 
-/* Make generic code ignore STT_REGISTER dummy undefined symbols,
- * and replace references to .func with _Func
- */
+/* Make generic code ignore STT_REGISTER dummy undefined symbols.  */
 int module_frob_arch_sections(Elf_Ehdr *hdr,
                              Elf_Shdr *sechdrs,
                              char *secstrings,
                              struct module *mod)
 {
        unsigned int symidx;
-       Elf32_Sym *sym;
+       Elf_Sym *sym;
        char *strtab;
        int i;
 
@@ -56,26 +96,23 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
                        return -ENOEXEC;
                }
        }
-       sym = (Elf32_Sym *)sechdrs[symidx].sh_addr;
+       sym = (Elf_Sym *)sechdrs[symidx].sh_addr;
        strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr;
 
        for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) {
                if (sym[i].st_shndx == SHN_UNDEF) {
-                       if (ELF32_ST_TYPE(sym[i].st_info) == STT_REGISTER)
+                       if (ELF_ST_TYPE(sym[i].st_info) == STT_REGISTER) {
                                sym[i].st_shndx = SHN_ABS;
-                       else {
+                       else {
                                char *name = strtab + sym[i].st_name;
-                               if (name[0] == '.') {
-                                       name[0] = '_';
-                                       name[1] = toupper(name[1]);
-                               }
+                               dot2underscore(name);
                        }
                }
        }
        return 0;
 }
 
-int apply_relocate(Elf32_Shdr *sechdrs,
+int apply_relocate(Elf_Shdr *sechdrs,
                   const char *strtab,
                   unsigned int symindex,
                   unsigned int relsec,
@@ -86,32 +123,68 @@ int apply_relocate(Elf32_Shdr *sechdrs,
        return -ENOEXEC;
 }
 
-int apply_relocate_add(Elf32_Shdr *sechdrs,
+int apply_relocate_add(Elf_Shdr *sechdrs,
                       const char *strtab,
                       unsigned int symindex,
                       unsigned int relsec,
                       struct module *me)
 {
        unsigned int i;
-       Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
-       Elf32_Sym *sym;
+       Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+       Elf_Sym *sym;
        u8 *location;
        u32 *loc32;
 
        for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
-               Elf32_Addr v;
+               Elf_Addr v;
 
                /* This is where to make the change */
                location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr
                        + rel[i].r_offset;
                loc32 = (u32 *) location;
+
+#ifdef CONFIG_SPARC64
+               BUG_ON(((u64)location >> (u64)32) != (u64)0);
+#endif /* CONFIG_SPARC64 */
+
                /* This is the symbol it is referring to.  Note that all
                   undefined symbols have been resolved.  */
-               sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
-                       + ELF32_R_SYM(rel[i].r_info);
+               sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+                       + ELF_R_SYM(rel[i].r_info);
                v = sym->st_value + rel[i].r_addend;
 
-               switch (ELF32_R_TYPE(rel[i].r_info)) {
+               switch (ELF_R_TYPE(rel[i].r_info) & 0xff) {
+#ifdef CONFIG_SPARC64
+               case R_SPARC_64:
+                       location[0] = v >> 56;
+                       location[1] = v >> 48;
+                       location[2] = v >> 40;
+                       location[3] = v >> 32;
+                       location[4] = v >> 24;
+                       location[5] = v >> 16;
+                       location[6] = v >>  8;
+                       location[7] = v >>  0;
+                       break;
+
+               case R_SPARC_DISP32:
+                       v -= (Elf_Addr) location;
+                       *loc32 = v;
+                       break;
+
+               case R_SPARC_WDISP19:
+                       v -= (Elf_Addr) location;
+                       *loc32 = (*loc32 & ~0x7ffff) |
+                               ((v >> 2) & 0x7ffff);
+                       break;
+
+               case R_SPARC_OLO10:
+                       *loc32 = (*loc32 & ~0x1fff) |
+                               (((v & 0x3ff) +
+                                 (ELF_R_TYPE(rel[i].r_info) >> 8))
+                                & 0x1fff);
+                       break;
+#endif /* CONFIG_SPARC64 */
+
                case R_SPARC_32:
                case R_SPARC_UA32:
                        location[0] = v >> 24;
@@ -121,13 +194,13 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
                        break;
 
                case R_SPARC_WDISP30:
-                       v -= (Elf32_Addr) location;
+                       v -= (Elf_Addr) location;
                        *loc32 = (*loc32 & ~0x3fffffff) |
                                ((v >> 2) & 0x3fffffff);
                        break;
 
                case R_SPARC_WDISP22:
-                       v -= (Elf32_Addr) location;
+                       v -= (Elf_Addr) location;
                        *loc32 = (*loc32 & ~0x3fffff) |
                                ((v >> 2) & 0x3fffff);
                        break;
@@ -144,19 +217,38 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
                default:
                        printk(KERN_ERR "module %s: Unknown relocation: %x\n",
                               me->name,
-                              (int) (ELF32_R_TYPE(rel[i].r_info) & 0xff));
+                              (int) (ELF_R_TYPE(rel[i].r_info) & 0xff));
                        return -ENOEXEC;
                };
        }
        return 0;
 }
 
+#ifdef CONFIG_SPARC64
 int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
+       /* Cheetah's I-cache is fully coherent.  */
+       if (tlb_type == spitfire) {
+               unsigned long va;
+
+               flushw_all();
+               for (va =  0; va < (PAGE_SIZE << 1); va += 32)
+                       spitfire_put_icache_tag(va, 0x0);
+               __asm__ __volatile__("flush %g6");
+       }
+
        return 0;
 }
+#else
+int module_finalize(const Elf_Ehdr *hdr,
+                    const Elf_Shdr *sechdrs,
+                    struct module *me)
+{
+        return 0;
+}
+#endif /* CONFIG_SPARC64 */
 
 void module_arch_cleanup(struct module *mod)
 {
index e352239e72c8f7423a7a70110f52bd8136ca5a2a..ba960c02bb55eedeb3b7c15cdf6e37e59cd8dd58 100644 (file)
@@ -17,6 +17,8 @@
 #include <asm/system.h>
 #include <asm/uaccess.h>
 
+#include "kernel.h"
+
 /* #define DEBUG_MULDIV */
 
 static inline int has_imm13(int insn)
@@ -88,9 +90,6 @@ store_reg(unsigned int result, unsigned int reg, struct pt_regs *regs)
                return (put_user(result, &win->locals[reg - 16]));
        }
 }
-               
-extern void handle_hw_divzero (struct pt_regs *regs, unsigned long pc,
-                              unsigned long npc, unsigned long psr);
 
 /* Should return 0 if mul/div emulation succeeded and SIGILL should
  * not be issued.
similarity index 98%
rename from arch/sparc64/kernel/of_device.c
rename to arch/sparc/kernel/of_device_64.c
index 0f616ae3246c5a9bc4be334f52c2879fead932f2..46e231f7c5ce2c37cf51b6111a0175f9dcc4280e 100644 (file)
@@ -811,20 +811,20 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
 
        irq = of_get_property(dp, "interrupts", &len);
        if (irq) {
-               memcpy(op->irqs, irq, len);
                op->num_irqs = len / 4;
+
+               /* Prevent overrunning the op->irqs[] array.  */
+               if (op->num_irqs > PROMINTR_MAX) {
+                       printk(KERN_WARNING "%s: Too many irqs (%d), "
+                              "limiting to %d.\n",
+                              dp->full_name, op->num_irqs, PROMINTR_MAX);
+                       op->num_irqs = PROMINTR_MAX;
+               }
+               memcpy(op->irqs, irq, op->num_irqs * 4);
        } else {
                op->num_irqs = 0;
        }
 
-       /* Prevent overrunning the op->irqs[] array.  */
-       if (op->num_irqs > PROMINTR_MAX) {
-               printk(KERN_WARNING "%s: Too many irqs (%d), "
-                      "limiting to %d.\n",
-                      dp->full_name, op->num_irqs, PROMINTR_MAX);
-               op->num_irqs = PROMINTR_MAX;
-       }
-
        build_device_resources(op, parent);
        for (i = 0; i < op->num_irqs; i++)
                op->irqs[i] = build_one_device_irq(op, parent, op->irqs[i]);
index 462584e55fba13bff47a39ab03ca533b8797cd05..75ed98be3edfe21bf08bb4eb2d9a39a7fca87945 100644 (file)
@@ -436,7 +436,7 @@ int pcic_present(void)
        return pcic0_up;
 }
 
-static int __init pdev_to_pnode(struct linux_pbm_info *pbm, 
+static int __devinit pdev_to_pnode(struct linux_pbm_info *pbm,
                                    struct pci_dev *pdev)
 {
        struct linux_prom_pci_registers regs[PROMREG_MAX];
index 2afcfab4f11cc011585ddc9901b64da953ed9b87..5e4563d86f191d39c2cf6ab386f233a13a877585 100644 (file)
  */
 
 #define PMC_OBPNAME    "SUNW,pmc"
-#define PMC_DEVNAME "pmc"
+#define PMC_DEVNAME    "pmc"
 
 #define PMC_IDLE_REG   0x00
-#define PMC_IDLE_ON            0x01
+#define PMC_IDLE_ON    0x01
 
 static u8 __iomem *regs;
 
 #define pmc_readb(offs)                (sbus_readb(regs+offs))
-#define pmc_writeb(val, offs)  (sbus_writeb(val, regs+offs))
+#define pmc_writeb(val, offs)  (sbus_writeb(val, regs+offs))
 
-/* 
+/*
  * CPU idle callback function
  * See .../arch/sparc/kernel/process.c
  */
-void pmc_swift_idle(void)
+static void pmc_swift_idle(void)
 {
 #ifdef PMC_DEBUG_LED
-       set_auxio(0x00, AUXIO_LED); 
+       set_auxio(0x00, AUXIO_LED);
 #endif
 
        pmc_writeb(pmc_readb(PMC_IDLE_REG) | PMC_IDLE_ON, PMC_IDLE_REG);
 
 #ifdef PMC_DEBUG_LED
-       set_auxio(AUXIO_LED, 0x00); 
+       set_auxio(AUXIO_LED, 0x00);
 #endif
-} 
+}
 
 static int __devinit pmc_probe(struct of_device *op,
                               const struct of_device_id *match)
@@ -63,7 +63,7 @@ static int __devinit pmc_probe(struct of_device *op,
 
 #ifndef PMC_NO_IDLE
        /* Assign power management IDLE handler */
-       pm_idle = pmc_swift_idle;       
+       pm_idle = pmc_swift_idle;
 #endif
 
        printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME);
similarity index 99%
rename from arch/sparc/kernel/process.c
rename to arch/sparc/kernel/process_32.c
index e8c43ffe317ef120d52094122e5cdab86e8b87d9..69d9315f4a93e10797a92ac46ad3006a2dc24f18 100644 (file)
@@ -168,11 +168,9 @@ void machine_restart(char * cmd)
 
 void machine_power_off(void)
 {
-#ifdef CONFIG_SUN_AUXIO
        if (auxio_power_register &&
            (strcmp(of_console_device->type, "serial") || scons_pwroff))
                *auxio_power_register |= AUXIO_POWER_OFF;
-#endif
        machine_halt();
 }
 
diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h
new file mode 100644 (file)
index 0000000..bb0f0fd
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef __PROM_H
+#define __PROM_H
+
+#include <linux/spinlock.h>
+#include <asm/prom.h>
+
+extern struct device_node *allnodes;   /* temporary while merging */
+extern rwlock_t devtree_lock;  /* temporary while merging */
+
+extern void * prom_early_alloc(unsigned long size);
+extern void irq_trans_init(struct device_node *dp);
+
+extern unsigned int prom_unique_id;
+
+static inline int is_root_node(const struct device_node *dp)
+{
+       if (!dp)
+               return 0;
+
+       return (dp->parent == NULL);
+}
+
+extern char *build_path_component(struct device_node *dp);
+extern void of_console_init(void);
+extern void of_fill_in_cpu_data(void);
+
+extern unsigned int prom_early_allocated;
+
+#endif /* __PROM_H */
similarity index 51%
rename from arch/sparc/kernel/prom.c
rename to arch/sparc/kernel/prom_32.c
index eee5efcfe50eea300a42d43e887bf4a7b5a54c6a..fe43e80772dbf597e201b7badd87521b364c6e53 100644 (file)
 #include <asm/prom.h>
 #include <asm/oplib.h>
 
-extern struct device_node *allnodes;   /* temporary while merging */
+#include "prom.h"
 
-extern rwlock_t devtree_lock;  /* temporary while merging */
-
-struct device_node *of_find_node_by_phandle(phandle handle)
-{
-       struct device_node *np;
-
-       for (np = allnodes; np != 0; np = np->allnext)
-               if (np->node == handle)
-                       break;
-
-       return np;
-}
-EXPORT_SYMBOL(of_find_node_by_phandle);
-
-int of_getintprop_default(struct device_node *np, const char *name, int def)
-{
-       struct property *prop;
-       int len;
-
-       prop = of_find_property(np, name, &len);
-       if (!prop || len != 4)
-               return def;
-
-       return *(int *) prop->value;
-}
-EXPORT_SYMBOL(of_getintprop_default);
-
-DEFINE_MUTEX(of_set_property_mutex);
-EXPORT_SYMBOL(of_set_property_mutex);
-
-int of_set_property(struct device_node *dp, const char *name, void *val, int len)
-{
-       struct property **prevp;
-       void *new_val;
-       int err;
-
-       new_val = kmalloc(len, GFP_KERNEL);
-       if (!new_val)
-               return -ENOMEM;
-
-       memcpy(new_val, val, len);
-
-       err = -ENODEV;
-
-       write_lock(&devtree_lock);
-       prevp = &dp->properties;
-       while (*prevp) {
-               struct property *prop = *prevp;
-
-               if (!strcasecmp(prop->name, name)) {
-                       void *old_val = prop->value;
-                       int ret;
-
-                       mutex_lock(&of_set_property_mutex);
-                       ret = prom_setprop(dp->node, (char *) name, val, len);
-                       mutex_unlock(&of_set_property_mutex);
-
-                       err = -EINVAL;
-                       if (ret >= 0) {
-                               prop->value = new_val;
-                               prop->length = len;
-
-                               if (OF_IS_DYNAMIC(prop))
-                                       kfree(old_val);
-
-                               OF_MARK_DYNAMIC(prop);
-
-                               err = 0;
-                       }
-                       break;
-               }
-               prevp = &(*prevp)->next;
-       }
-       write_unlock(&devtree_lock);
-
-       /* XXX Upate procfs if necessary... */
-
-       return err;
-}
-EXPORT_SYMBOL(of_set_property);
-
-int of_find_in_proplist(const char *list, const char *match, int len)
-{
-       while (len > 0) {
-               int l;
-
-               if (!strcmp(list, match))
-                       return 1;
-               l = strlen(list) + 1;
-               list += l;
-               len -= l;
-       }
-       return 0;
-}
-EXPORT_SYMBOL(of_find_in_proplist);
-
-static unsigned int prom_early_allocated;
-
-static void * __init prom_early_alloc(unsigned long size)
+void * __init prom_early_alloc(unsigned long size)
 {
        void *ret;
 
@@ -138,14 +40,6 @@ static void * __init prom_early_alloc(unsigned long size)
        return ret;
 }
 
-static int is_root_node(const struct device_node *dp)
-{
-       if (!dp)
-               return 0;
-
-       return (dp->parent == NULL);
-}
-
 /* The following routines deal with the black magic of fully naming a
  * node.
  *
@@ -257,7 +151,7 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
        return sparc32_path_component(dp, tmp_buf);
 }
 
-static char * __init build_path_component(struct device_node *dp)
+char * __init build_path_component(struct device_node *dp)
 {
        char tmp_buf[64], *n;
 
@@ -272,164 +166,9 @@ static char * __init build_path_component(struct device_node *dp)
        return n;
 }
 
-static char * __init build_full_name(struct device_node *dp)
-{
-       int len, ourlen, plen;
-       char *n;
-
-       plen = strlen(dp->parent->full_name);
-       ourlen = strlen(dp->path_component_name);
-       len = ourlen + plen + 2;
-
-       n = prom_early_alloc(len);
-       strcpy(n, dp->parent->full_name);
-       if (!is_root_node(dp->parent)) {
-               strcpy(n + plen, "/");
-               plen++;
-       }
-       strcpy(n + plen, dp->path_component_name);
-
-       return n;
-}
-
-static unsigned int unique_id;
-
-static struct property * __init build_one_prop(phandle node, char *prev, char *special_name, void *special_val, int special_len)
-{
-       static struct property *tmp = NULL;
-       struct property *p;
-       int len;
-       const char *name;
-
-       if (tmp) {
-               p = tmp;
-               memset(p, 0, sizeof(*p) + 32);
-               tmp = NULL;
-       } else {
-               p = prom_early_alloc(sizeof(struct property) + 32);
-               p->unique_id = unique_id++;
-       }
-
-       p->name = (char *) (p + 1);
-       if (special_name) {
-               strcpy(p->name, special_name);
-               p->length = special_len;
-               p->value = prom_early_alloc(special_len);
-               memcpy(p->value, special_val, special_len);
-       } else {
-               if (prev == NULL) {
-                       name = prom_firstprop(node, NULL);
-               } else {
-                       name = prom_nextprop(node, prev, NULL);
-               }
-               if (strlen(name) == 0) {
-                       tmp = p;
-                       return NULL;
-               }
-               strcpy(p->name, name);
-               p->length = prom_getproplen(node, p->name);
-               if (p->length <= 0) {
-                       p->length = 0;
-               } else {
-                       p->value = prom_early_alloc(p->length + 1);
-                       len = prom_getproperty(node, p->name, p->value,
-                                              p->length);
-                       if (len <= 0)
-                               p->length = 0;
-                       ((unsigned char *)p->value)[p->length] = '\0';
-               }
-       }
-       return p;
-}
-
-static struct property * __init build_prop_list(phandle node)
-{
-       struct property *head, *tail;
-
-       head = tail = build_one_prop(node, NULL,
-                                    ".node", &node, sizeof(node));
-
-       tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
-       tail = tail->next;
-       while(tail) {
-               tail->next = build_one_prop(node, tail->name,
-                                           NULL, NULL, 0);
-               tail = tail->next;
-       }
-
-       return head;
-}
-
-static char * __init get_one_property(phandle node, char *name)
-{
-       char *buf = "<NULL>";
-       int len;
-
-       len = prom_getproplen(node, name);
-       if (len > 0) {
-               buf = prom_early_alloc(len);
-               len = prom_getproperty(node, name, buf, len);
-       }
-
-       return buf;
-}
-
-static struct device_node * __init create_node(phandle node)
-{
-       struct device_node *dp;
-
-       if (!node)
-               return NULL;
-
-       dp = prom_early_alloc(sizeof(*dp));
-       dp->unique_id = unique_id++;
-
-       kref_init(&dp->kref);
-
-       dp->name = get_one_property(node, "name");
-       dp->type = get_one_property(node, "device_type");
-       dp->node = node;
-
-       /* Build interrupts later... */
-
-       dp->properties = build_prop_list(node);
-
-       return dp;
-}
-
-static struct device_node * __init build_tree(struct device_node *parent, phandle node, struct device_node ***nextp)
-{
-       struct device_node *dp;
-
-       dp = create_node(node);
-       if (dp) {
-               *(*nextp) = dp;
-               *nextp = &dp->allnext;
-
-               dp->parent = parent;
-               dp->path_component_name = build_path_component(dp);
-               dp->full_name = build_full_name(dp);
-
-               dp->child = build_tree(dp, prom_getchild(node), nextp);
-
-               dp->sibling = build_tree(parent, prom_getsibling(node), nextp);
-       }
-
-       return dp;
-}
-
-struct device_node *of_console_device;
-EXPORT_SYMBOL(of_console_device);
-
-char *of_console_path;
-EXPORT_SYMBOL(of_console_path);
-
-char *of_console_options;
-EXPORT_SYMBOL(of_console_options);
-
 extern void restore_current(void);
 
-static void __init of_console_init(void)
+void __init of_console_init(void)
 {
        char *msg = "OF stdout device is: %s\n";
        struct device_node *dp;
@@ -547,20 +286,10 @@ static void __init of_console_init(void)
        printk(msg, of_console_path);
 }
 
-void __init prom_build_devicetree(void)
+void __init of_fill_in_cpu_data(void)
 {
-       struct device_node **nextp;
-
-       allnodes = create_node(prom_root_node);
-       allnodes->path_component_name = "";
-       allnodes->full_name = "/";
-
-       nextp = &allnodes->allnext;
-       allnodes->child = build_tree(allnodes,
-                                    prom_getchild(allnodes->node),
-                                    &nextp);
-       of_console_init();
+}
 
-       printk("PROM: Built device tree with %u bytes of memory.\n",
-              prom_early_allocated);
+void __init irq_trans_init(struct device_node *dp)
+{
 }
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
new file mode 100644 (file)
index 0000000..edecca7
--- /dev/null
@@ -0,0 +1,571 @@
+/*
+ * Procedures for creating, accessing and interpreting the device tree.
+ *
+ * Paul Mackerras      August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ * 
+ *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ *    {engebret|bergner}@us.ibm.com 
+ *
+ *  Adapted for sparc64 by David S. Miller davem@davemloft.net
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/lmb.h>
+#include <linux/of_device.h>
+
+#include <asm/prom.h>
+#include <asm/oplib.h>
+#include <asm/irq.h>
+#include <asm/asi.h>
+#include <asm/upa.h>
+#include <asm/smp.h>
+
+#include "prom.h"
+
+void * __init prom_early_alloc(unsigned long size)
+{
+       unsigned long paddr = lmb_alloc(size, SMP_CACHE_BYTES);
+       void *ret;
+
+       if (!paddr) {
+               prom_printf("prom_early_alloc(%lu) failed\n");
+               prom_halt();
+       }
+
+       ret = __va(paddr);
+       memset(ret, 0, size);
+       prom_early_allocated += size;
+
+       return ret;
+}
+
+/* The following routines deal with the black magic of fully naming a
+ * node.
+ *
+ * Certain well known named nodes are just the simple name string.
+ *
+ * Actual devices have an address specifier appended to the base name
+ * string, like this "foo@addr".  The "addr" can be in any number of
+ * formats, and the platform plus the type of the node determine the
+ * format and how it is constructed.
+ *
+ * For children of the ROOT node, the naming convention is fixed and
+ * determined by whether this is a sun4u or sun4v system.
+ *
+ * For children of other nodes, it is bus type specific.  So
+ * we walk up the tree until we discover a "device_type" property
+ * we recognize and we go from there.
+ *
+ * As an example, the boot device on my workstation has a full path:
+ *
+ *     /pci@1e,600000/ide@d/disk@0,0:c
+ */
+static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
+{
+       struct linux_prom64_registers *regs;
+       struct property *rprop;
+       u32 high_bits, low_bits, type;
+
+       rprop = of_find_property(dp, "reg", NULL);
+       if (!rprop)
+               return;
+
+       regs = rprop->value;
+       if (!is_root_node(dp->parent)) {
+               sprintf(tmp_buf, "%s@%x,%x",
+                       dp->name,
+                       (unsigned int) (regs->phys_addr >> 32UL),
+                       (unsigned int) (regs->phys_addr & 0xffffffffUL));
+               return;
+       }
+
+       type = regs->phys_addr >> 60UL;
+       high_bits = (regs->phys_addr >> 32UL) & 0x0fffffffUL;
+       low_bits = (regs->phys_addr & 0xffffffffUL);
+
+       if (type == 0 || type == 8) {
+               const char *prefix = (type == 0) ? "m" : "i";
+
+               if (low_bits)
+                       sprintf(tmp_buf, "%s@%s%x,%x",
+                               dp->name, prefix,
+                               high_bits, low_bits);
+               else
+                       sprintf(tmp_buf, "%s@%s%x",
+                               dp->name,
+                               prefix,
+                               high_bits);
+       } else if (type == 12) {
+               sprintf(tmp_buf, "%s@%x",
+                       dp->name, high_bits);
+       }
+}
+
+static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
+{
+       struct linux_prom64_registers *regs;
+       struct property *prop;
+
+       prop = of_find_property(dp, "reg", NULL);
+       if (!prop)
+               return;
+
+       regs = prop->value;
+       if (!is_root_node(dp->parent)) {
+               sprintf(tmp_buf, "%s@%x,%x",
+                       dp->name,
+                       (unsigned int) (regs->phys_addr >> 32UL),
+                       (unsigned int) (regs->phys_addr & 0xffffffffUL));
+               return;
+       }
+
+       prop = of_find_property(dp, "upa-portid", NULL);
+       if (!prop)
+               prop = of_find_property(dp, "portid", NULL);
+       if (prop) {
+               unsigned long mask = 0xffffffffUL;
+
+               if (tlb_type >= cheetah)
+                       mask = 0x7fffff;
+
+               sprintf(tmp_buf, "%s@%x,%x",
+                       dp->name,
+                       *(u32 *)prop->value,
+                       (unsigned int) (regs->phys_addr & mask));
+       }
+}
+
+/* "name@slot,offset"  */
+static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
+{
+       struct linux_prom_registers *regs;
+       struct property *prop;
+
+       prop = of_find_property(dp, "reg", NULL);
+       if (!prop)
+               return;
+
+       regs = prop->value;
+       sprintf(tmp_buf, "%s@%x,%x",
+               dp->name,
+               regs->which_io,
+               regs->phys_addr);
+}
+
+/* "name@devnum[,func]" */
+static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
+{
+       struct linux_prom_pci_registers *regs;
+       struct property *prop;
+       unsigned int devfn;
+
+       prop = of_find_property(dp, "reg", NULL);
+       if (!prop)
+               return;
+
+       regs = prop->value;
+       devfn = (regs->phys_hi >> 8) & 0xff;
+       if (devfn & 0x07) {
+               sprintf(tmp_buf, "%s@%x,%x",
+                       dp->name,
+                       devfn >> 3,
+                       devfn & 0x07);
+       } else {
+               sprintf(tmp_buf, "%s@%x",
+                       dp->name,
+                       devfn >> 3);
+       }
+}
+
+/* "name@UPA_PORTID,offset" */
+static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
+{
+       struct linux_prom64_registers *regs;
+       struct property *prop;
+
+       prop = of_find_property(dp, "reg", NULL);
+       if (!prop)
+               return;
+
+       regs = prop->value;
+
+       prop = of_find_property(dp, "upa-portid", NULL);
+       if (!prop)
+               return;
+
+       sprintf(tmp_buf, "%s@%x,%x",
+               dp->name,
+               *(u32 *) prop->value,
+               (unsigned int) (regs->phys_addr & 0xffffffffUL));
+}
+
+/* "name@reg" */
+static void __init vdev_path_component(struct device_node *dp, char *tmp_buf)
+{
+       struct property *prop;
+       u32 *regs;
+
+       prop = of_find_property(dp, "reg", NULL);
+       if (!prop)
+               return;
+
+       regs = prop->value;
+
+       sprintf(tmp_buf, "%s@%x", dp->name, *regs);
+}
+
+/* "name@addrhi,addrlo" */
+static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
+{
+       struct linux_prom64_registers *regs;
+       struct property *prop;
+
+       prop = of_find_property(dp, "reg", NULL);
+       if (!prop)
+               return;
+
+       regs = prop->value;
+
+       sprintf(tmp_buf, "%s@%x,%x",
+               dp->name,
+               (unsigned int) (regs->phys_addr >> 32UL),
+               (unsigned int) (regs->phys_addr & 0xffffffffUL));
+}
+
+/* "name@bus,addr" */
+static void __init i2c_path_component(struct device_node *dp, char *tmp_buf)
+{
+       struct property *prop;
+       u32 *regs;
+
+       prop = of_find_property(dp, "reg", NULL);
+       if (!prop)
+               return;
+
+       regs = prop->value;
+
+       /* This actually isn't right... should look at the #address-cells
+        * property of the i2c bus node etc. etc.
+        */
+       sprintf(tmp_buf, "%s@%x,%x",
+               dp->name, regs[0], regs[1]);
+}
+
+/* "name@reg0[,reg1]" */
+static void __init usb_path_component(struct device_node *dp, char *tmp_buf)
+{
+       struct property *prop;
+       u32 *regs;
+
+       prop = of_find_property(dp, "reg", NULL);
+       if (!prop)
+               return;
+
+       regs = prop->value;
+
+       if (prop->length == sizeof(u32) || regs[1] == 1) {
+               sprintf(tmp_buf, "%s@%x",
+                       dp->name, regs[0]);
+       } else {
+               sprintf(tmp_buf, "%s@%x,%x",
+                       dp->name, regs[0], regs[1]);
+       }
+}
+
+/* "name@reg0reg1[,reg2reg3]" */
+static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf)
+{
+       struct property *prop;
+       u32 *regs;
+
+       prop = of_find_property(dp, "reg", NULL);
+       if (!prop)
+               return;
+
+       regs = prop->value;
+
+       if (regs[2] || regs[3]) {
+               sprintf(tmp_buf, "%s@%08x%08x,%04x%08x",
+                       dp->name, regs[0], regs[1], regs[2], regs[3]);
+       } else {
+               sprintf(tmp_buf, "%s@%08x%08x",
+                       dp->name, regs[0], regs[1]);
+       }
+}
+
+static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
+{
+       struct device_node *parent = dp->parent;
+
+       if (parent != NULL) {
+               if (!strcmp(parent->type, "pci") ||
+                   !strcmp(parent->type, "pciex")) {
+                       pci_path_component(dp, tmp_buf);
+                       return;
+               }
+               if (!strcmp(parent->type, "sbus")) {
+                       sbus_path_component(dp, tmp_buf);
+                       return;
+               }
+               if (!strcmp(parent->type, "upa")) {
+                       upa_path_component(dp, tmp_buf);
+                       return;
+               }
+               if (!strcmp(parent->type, "ebus")) {
+                       ebus_path_component(dp, tmp_buf);
+                       return;
+               }
+               if (!strcmp(parent->name, "usb") ||
+                   !strcmp(parent->name, "hub")) {
+                       usb_path_component(dp, tmp_buf);
+                       return;
+               }
+               if (!strcmp(parent->type, "i2c")) {
+                       i2c_path_component(dp, tmp_buf);
+                       return;
+               }
+               if (!strcmp(parent->type, "firewire")) {
+                       ieee1394_path_component(dp, tmp_buf);
+                       return;
+               }
+               if (!strcmp(parent->type, "virtual-devices")) {
+                       vdev_path_component(dp, tmp_buf);
+                       return;
+               }
+               /* "isa" is handled with platform naming */
+       }
+
+       /* Use platform naming convention.  */
+       if (tlb_type == hypervisor) {
+               sun4v_path_component(dp, tmp_buf);
+               return;
+       } else {
+               sun4u_path_component(dp, tmp_buf);
+       }
+}
+
+char * __init build_path_component(struct device_node *dp)
+{
+       char tmp_buf[64], *n;
+
+       tmp_buf[0] = '\0';
+       __build_path_component(dp, tmp_buf);
+       if (tmp_buf[0] == '\0')
+               strcpy(tmp_buf, dp->name);
+
+       n = prom_early_alloc(strlen(tmp_buf) + 1);
+       strcpy(n, tmp_buf);
+
+       return n;
+}
+
+static const char *get_mid_prop(void)
+{
+       return (tlb_type == spitfire ? "upa-portid" : "portid");
+}
+
+struct device_node *of_find_node_by_cpuid(int cpuid)
+{
+       struct device_node *dp;
+       const char *mid_prop = get_mid_prop();
+
+       for_each_node_by_type(dp, "cpu") {
+               int id = of_getintprop_default(dp, mid_prop, -1);
+               const char *this_mid_prop = mid_prop;
+
+               if (id < 0) {
+                       this_mid_prop = "cpuid";
+                       id = of_getintprop_default(dp, this_mid_prop, -1);
+               }
+
+               if (id < 0) {
+                       prom_printf("OF: Serious problem, cpu lacks "
+                                   "%s property", this_mid_prop);
+                       prom_halt();
+               }
+               if (cpuid == id)
+                       return dp;
+       }
+       return NULL;
+}
+
+void __init of_fill_in_cpu_data(void)
+{
+       struct device_node *dp;
+       const char *mid_prop;
+
+       if (tlb_type == hypervisor)
+               return;
+
+       mid_prop = get_mid_prop();
+       ncpus_probed = 0;
+       for_each_node_by_type(dp, "cpu") {
+               int cpuid = of_getintprop_default(dp, mid_prop, -1);
+               const char *this_mid_prop = mid_prop;
+               struct device_node *portid_parent;
+               int portid = -1;
+
+               portid_parent = NULL;
+               if (cpuid < 0) {
+                       this_mid_prop = "cpuid";
+                       cpuid = of_getintprop_default(dp, this_mid_prop, -1);
+                       if (cpuid >= 0) {
+                               int limit = 2;
+
+                               portid_parent = dp;
+                               while (limit--) {
+                                       portid_parent = portid_parent->parent;
+                                       if (!portid_parent)
+                                               break;
+                                       portid = of_getintprop_default(portid_parent,
+                                                                      "portid", -1);
+                                       if (portid >= 0)
+                                               break;
+                               }
+                       }
+               }
+
+               if (cpuid < 0) {
+                       prom_printf("OF: Serious problem, cpu lacks "
+                                   "%s property", this_mid_prop);
+                       prom_halt();
+               }
+
+               ncpus_probed++;
+
+#ifdef CONFIG_SMP
+               if (cpuid >= NR_CPUS) {
+                       printk(KERN_WARNING "Ignoring CPU %d which is "
+                              ">= NR_CPUS (%d)\n",
+                              cpuid, NR_CPUS);
+                       continue;
+               }
+#else
+               /* On uniprocessor we only want the values for the
+                * real physical cpu the kernel booted onto, however
+                * cpu_data() only has one entry at index 0.
+                */
+               if (cpuid != real_hard_smp_processor_id())
+                       continue;
+               cpuid = 0;
+#endif
+
+               cpu_data(cpuid).clock_tick =
+                       of_getintprop_default(dp, "clock-frequency", 0);
+
+               if (portid_parent) {
+                       cpu_data(cpuid).dcache_size =
+                               of_getintprop_default(dp, "l1-dcache-size",
+                                                     16 * 1024);
+                       cpu_data(cpuid).dcache_line_size =
+                               of_getintprop_default(dp, "l1-dcache-line-size",
+                                                     32);
+                       cpu_data(cpuid).icache_size =
+                               of_getintprop_default(dp, "l1-icache-size",
+                                                     8 * 1024);
+                       cpu_data(cpuid).icache_line_size =
+                               of_getintprop_default(dp, "l1-icache-line-size",
+                                                     32);
+                       cpu_data(cpuid).ecache_size =
+                               of_getintprop_default(dp, "l2-cache-size", 0);
+                       cpu_data(cpuid).ecache_line_size =
+                               of_getintprop_default(dp, "l2-cache-line-size", 0);
+                       if (!cpu_data(cpuid).ecache_size ||
+                           !cpu_data(cpuid).ecache_line_size) {
+                               cpu_data(cpuid).ecache_size =
+                                       of_getintprop_default(portid_parent,
+                                                             "l2-cache-size",
+                                                             (4 * 1024 * 1024));
+                               cpu_data(cpuid).ecache_line_size =
+                                       of_getintprop_default(portid_parent,
+                                                             "l2-cache-line-size", 64);
+                       }
+
+                       cpu_data(cpuid).core_id = portid + 1;
+                       cpu_data(cpuid).proc_id = portid;
+#ifdef CONFIG_SMP
+                       sparc64_multi_core = 1;
+#endif
+               } else {
+                       cpu_data(cpuid).dcache_size =
+                               of_getintprop_default(dp, "dcache-size", 16 * 1024);
+                       cpu_data(cpuid).dcache_line_size =
+                               of_getintprop_default(dp, "dcache-line-size", 32);
+
+                       cpu_data(cpuid).icache_size =
+                               of_getintprop_default(dp, "icache-size", 16 * 1024);
+                       cpu_data(cpuid).icache_line_size =
+                               of_getintprop_default(dp, "icache-line-size", 32);
+
+                       cpu_data(cpuid).ecache_size =
+                               of_getintprop_default(dp, "ecache-size",
+                                                     (4 * 1024 * 1024));
+                       cpu_data(cpuid).ecache_line_size =
+                               of_getintprop_default(dp, "ecache-line-size", 64);
+
+                       cpu_data(cpuid).core_id = 0;
+                       cpu_data(cpuid).proc_id = -1;
+               }
+
+#ifdef CONFIG_SMP
+               cpu_set(cpuid, cpu_present_map);
+               cpu_set(cpuid, cpu_possible_map);
+#endif
+       }
+
+       smp_fill_in_sib_core_maps();
+}
+
+void __init of_console_init(void)
+{
+       char *msg = "OF stdout device is: %s\n";
+       struct device_node *dp;
+       const char *type;
+       phandle node;
+
+       of_console_path = prom_early_alloc(256);
+       if (prom_ihandle2path(prom_stdout, of_console_path, 256) < 0) {
+               prom_printf("Cannot obtain path of stdout.\n");
+               prom_halt();
+       }
+       of_console_options = strrchr(of_console_path, ':');
+       if (of_console_options) {
+               of_console_options++;
+               if (*of_console_options == '\0')
+                       of_console_options = NULL;
+       }
+
+       node = prom_inst2pkg(prom_stdout);
+       if (!node) {
+               prom_printf("Cannot resolve stdout node from "
+                           "instance %08x.\n", prom_stdout);
+               prom_halt();
+       }
+
+       dp = of_find_node_by_phandle(node);
+       type = of_get_property(dp, "device_type", NULL);
+       if (!type) {
+               prom_printf("Console stdout lacks device_type property.\n");
+               prom_halt();
+       }
+
+       if (strcmp(type, "display") && strcmp(type, "serial")) {
+               prom_printf("Console device_type is neither display "
+                           "nor serial.\n");
+               prom_halt();
+       }
+
+       of_console_device = dp;
+
+       printk(msg, of_console_path);
+}
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
new file mode 100644 (file)
index 0000000..4e9af59
--- /dev/null
@@ -0,0 +1,326 @@
+/* prom_common.c: OF device tree support common code.
+ *
+ * Paul Mackerras      August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ *    {engebret|bergner}@us.ibm.com
+ *
+ *  Adapted for sparc by David S. Miller davem@davemloft.net
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <asm/prom.h>
+#include <asm/oplib.h>
+
+#include "prom.h"
+
+struct device_node *of_console_device;
+EXPORT_SYMBOL(of_console_device);
+
+char *of_console_path;
+EXPORT_SYMBOL(of_console_path);
+
+char *of_console_options;
+EXPORT_SYMBOL(of_console_options);
+
+struct device_node *of_find_node_by_phandle(phandle handle)
+{
+       struct device_node *np;
+
+       for (np = allnodes; np; np = np->allnext)
+               if (np->node == handle)
+                       break;
+
+       return np;
+}
+EXPORT_SYMBOL(of_find_node_by_phandle);
+
+int of_getintprop_default(struct device_node *np, const char *name, int def)
+{
+       struct property *prop;
+       int len;
+
+       prop = of_find_property(np, name, &len);
+       if (!prop || len != 4)
+               return def;
+
+       return *(int *) prop->value;
+}
+EXPORT_SYMBOL(of_getintprop_default);
+
+DEFINE_MUTEX(of_set_property_mutex);
+EXPORT_SYMBOL(of_set_property_mutex);
+
+int of_set_property(struct device_node *dp, const char *name, void *val, int len)
+{
+       struct property **prevp;
+       void *new_val;
+       int err;
+
+       new_val = kmalloc(len, GFP_KERNEL);
+       if (!new_val)
+               return -ENOMEM;
+
+       memcpy(new_val, val, len);
+
+       err = -ENODEV;
+
+       write_lock(&devtree_lock);
+       prevp = &dp->properties;
+       while (*prevp) {
+               struct property *prop = *prevp;
+
+               if (!strcasecmp(prop->name, name)) {
+                       void *old_val = prop->value;
+                       int ret;
+
+                       mutex_lock(&of_set_property_mutex);
+                       ret = prom_setprop(dp->node, name, val, len);
+                       mutex_unlock(&of_set_property_mutex);
+
+                       err = -EINVAL;
+                       if (ret >= 0) {
+                               prop->value = new_val;
+                               prop->length = len;
+
+                               if (OF_IS_DYNAMIC(prop))
+                                       kfree(old_val);
+
+                               OF_MARK_DYNAMIC(prop);
+
+                               err = 0;
+                       }
+                       break;
+               }
+               prevp = &(*prevp)->next;
+       }
+       write_unlock(&devtree_lock);
+
+       /* XXX Upate procfs if necessary... */
+
+       return err;
+}
+EXPORT_SYMBOL(of_set_property);
+
+int of_find_in_proplist(const char *list, const char *match, int len)
+{
+       while (len > 0) {
+               int l;
+
+               if (!strcmp(list, match))
+                       return 1;
+               l = strlen(list) + 1;
+               list += l;
+               len -= l;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(of_find_in_proplist);
+
+unsigned int prom_unique_id;
+
+static struct property * __init build_one_prop(phandle node, char *prev,
+                                              char *special_name,
+                                              void *special_val,
+                                              int special_len)
+{
+       static struct property *tmp = NULL;
+       struct property *p;
+       const char *name;
+
+       if (tmp) {
+               p = tmp;
+               memset(p, 0, sizeof(*p) + 32);
+               tmp = NULL;
+       } else {
+               p = prom_early_alloc(sizeof(struct property) + 32);
+               p->unique_id = prom_unique_id++;
+       }
+
+       p->name = (char *) (p + 1);
+       if (special_name) {
+               strcpy(p->name, special_name);
+               p->length = special_len;
+               p->value = prom_early_alloc(special_len);
+               memcpy(p->value, special_val, special_len);
+       } else {
+#ifdef CONFIG_SPARC32
+               if (prev == NULL) {
+                       name = prom_firstprop(node, NULL);
+               } else {
+                       name = prom_nextprop(node, prev, NULL);
+               }
+#else
+               if (prev == NULL) {
+                       prom_firstprop(node, p->name);
+               } else {
+                       prom_nextprop(node, prev, p->name);
+               }
+               name = p->name;
+#endif
+               if (strlen(name) == 0) {
+                       tmp = p;
+                       return NULL;
+               }
+#ifdef CONFIG_SPARC32
+               strcpy(p->name, name);
+#endif
+               p->length = prom_getproplen(node, p->name);
+               if (p->length <= 0) {
+                       p->length = 0;
+               } else {
+                       int len;
+
+                       p->value = prom_early_alloc(p->length + 1);
+                       len = prom_getproperty(node, p->name, p->value,
+                                              p->length);
+                       if (len <= 0)
+                               p->length = 0;
+                       ((unsigned char *)p->value)[p->length] = '\0';
+               }
+       }
+       return p;
+}
+
+static struct property * __init build_prop_list(phandle node)
+{
+       struct property *head, *tail;
+
+       head = tail = build_one_prop(node, NULL,
+                                    ".node", &node, sizeof(node));
+
+       tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
+       tail = tail->next;
+       while(tail) {
+               tail->next = build_one_prop(node, tail->name,
+                                           NULL, NULL, 0);
+               tail = tail->next;
+       }
+
+       return head;
+}
+
+static char * __init get_one_property(phandle node, const char *name)
+{
+       char *buf = "<NULL>";
+       int len;
+
+       len = prom_getproplen(node, name);
+       if (len > 0) {
+               buf = prom_early_alloc(len);
+               len = prom_getproperty(node, name, buf, len);
+       }
+
+       return buf;
+}
+
+static struct device_node * __init prom_create_node(phandle node,
+                                                   struct device_node *parent)
+{
+       struct device_node *dp;
+
+       if (!node)
+               return NULL;
+
+       dp = prom_early_alloc(sizeof(*dp));
+       dp->unique_id = prom_unique_id++;
+       dp->parent = parent;
+
+       kref_init(&dp->kref);
+
+       dp->name = get_one_property(node, "name");
+       dp->type = get_one_property(node, "device_type");
+       dp->node = node;
+
+       dp->properties = build_prop_list(node);
+
+       irq_trans_init(dp);
+
+       return dp;
+}
+
+static char * __init build_full_name(struct device_node *dp)
+{
+       int len, ourlen, plen;
+       char *n;
+
+       plen = strlen(dp->parent->full_name);
+       ourlen = strlen(dp->path_component_name);
+       len = ourlen + plen + 2;
+
+       n = prom_early_alloc(len);
+       strcpy(n, dp->parent->full_name);
+       if (!is_root_node(dp->parent)) {
+               strcpy(n + plen, "/");
+               plen++;
+       }
+       strcpy(n + plen, dp->path_component_name);
+
+       return n;
+}
+
+static struct device_node * __init prom_build_tree(struct device_node *parent,
+                                                  phandle node,
+                                                  struct device_node ***nextp)
+{
+       struct device_node *ret = NULL, *prev_sibling = NULL;
+       struct device_node *dp;
+
+       while (1) {
+               dp = prom_create_node(node, parent);
+               if (!dp)
+                       break;
+
+               if (prev_sibling)
+                       prev_sibling->sibling = dp;
+
+               if (!ret)
+                       ret = dp;
+               prev_sibling = dp;
+
+               *(*nextp) = dp;
+               *nextp = &dp->allnext;
+
+               dp->path_component_name = build_path_component(dp);
+               dp->full_name = build_full_name(dp);
+
+               dp->child = prom_build_tree(dp, prom_getchild(node), nextp);
+
+               node = prom_getsibling(node);
+       }
+
+       return ret;
+}
+
+unsigned int prom_early_allocated __initdata;
+
+void __init prom_build_devicetree(void)
+{
+       struct device_node **nextp;
+
+       allnodes = prom_create_node(prom_root_node, NULL);
+       allnodes->path_component_name = "";
+       allnodes->full_name = "/";
+
+       nextp = &allnodes->allnext;
+       allnodes->child = prom_build_tree(allnodes,
+                                         prom_getchild(allnodes->node),
+                                         &nextp);
+       of_console_init();
+
+       printk("PROM: Built device tree with %u bytes of memory.\n",
+              prom_early_allocated);
+
+       of_fill_in_cpu_data();
+}
similarity index 53%
rename from arch/sparc64/kernel/prom.c
rename to arch/sparc/kernel/prom_irqtrans.c
index dbba82f9b142c3e950911bdcb4a0359707b1b5bd..96958c4dce8ee1d531382b18a369cecba5fa5d40 100644 (file)
-/*
- * Procedures for creating, accessing and interpreting the device tree.
- *
- * Paul Mackerras      August 1996.
- * Copyright (C) 1996-2005 Paul Mackerras.
- * 
- *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
- *    {engebret|bergner}@us.ibm.com 
- *
- *  Adapted for sparc64 by David S. Miller davem@davemloft.net
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- */
-
 #include <linux/kernel.h>
-#include <linux/types.h>
 #include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/lmb.h>
-#include <linux/of_device.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
 
-#include <asm/prom.h>
 #include <asm/oplib.h>
+#include <asm/prom.h>
 #include <asm/irq.h>
-#include <asm/asi.h>
 #include <asm/upa.h>
-#include <asm/smp.h>
-
-extern struct device_node *allnodes;   /* temporary while merging */
-
-extern rwlock_t devtree_lock;  /* temporary while merging */
-
-struct device_node *of_find_node_by_phandle(phandle handle)
-{
-       struct device_node *np;
-
-       for (np = allnodes; np; np = np->allnext)
-               if (np->node == handle)
-                       break;
-
-       return np;
-}
-EXPORT_SYMBOL(of_find_node_by_phandle);
-
-int of_getintprop_default(struct device_node *np, const char *name, int def)
-{
-       struct property *prop;
-       int len;
-
-       prop = of_find_property(np, name, &len);
-       if (!prop || len != 4)
-               return def;
-
-       return *(int *) prop->value;
-}
-EXPORT_SYMBOL(of_getintprop_default);
-
-DEFINE_MUTEX(of_set_property_mutex);
-EXPORT_SYMBOL(of_set_property_mutex);
-
-int of_set_property(struct device_node *dp, const char *name, void *val, int len)
-{
-       struct property **prevp;
-       void *new_val;
-       int err;
-
-       new_val = kmalloc(len, GFP_KERNEL);
-       if (!new_val)
-               return -ENOMEM;
-
-       memcpy(new_val, val, len);
-
-       err = -ENODEV;
-
-       write_lock(&devtree_lock);
-       prevp = &dp->properties;
-       while (*prevp) {
-               struct property *prop = *prevp;
-
-               if (!strcasecmp(prop->name, name)) {
-                       void *old_val = prop->value;
-                       int ret;
-
-                       mutex_lock(&of_set_property_mutex);
-                       ret = prom_setprop(dp->node, name, val, len);
-                       mutex_unlock(&of_set_property_mutex);
-
-                       err = -EINVAL;
-                       if (ret >= 0) {
-                               prop->value = new_val;
-                               prop->length = len;
-
-                               if (OF_IS_DYNAMIC(prop))
-                                       kfree(old_val);
-
-                               OF_MARK_DYNAMIC(prop);
-
-                               err = 0;
-                       }
-                       break;
-               }
-               prevp = &(*prevp)->next;
-       }
-       write_unlock(&devtree_lock);
-
-       /* XXX Upate procfs if necessary... */
 
-       return err;
-}
-EXPORT_SYMBOL(of_set_property);
-
-int of_find_in_proplist(const char *list, const char *match, int len)
-{
-       while (len > 0) {
-               int l;
-
-               if (!strcmp(list, match))
-                       return 1;
-               l = strlen(list) + 1;
-               list += l;
-               len -= l;
-       }
-       return 0;
-}
-EXPORT_SYMBOL(of_find_in_proplist);
-
-static unsigned int prom_early_allocated __initdata;
-
-static void * __init prom_early_alloc(unsigned long size)
-{
-       unsigned long paddr = lmb_alloc(size, SMP_CACHE_BYTES);
-       void *ret;
-
-       if (!paddr) {
-               prom_printf("prom_early_alloc(%lu) failed\n");
-               prom_halt();
-       }
-
-       ret = __va(paddr);
-       memset(ret, 0, size);
-       prom_early_allocated += size;
-
-       return ret;
-}
+#include "prom.h"
 
 #ifdef CONFIG_PCI
 /* PSYCHO interrupt mapping support. */
@@ -936,7 +800,7 @@ static void __init sun4v_vdev_irq_trans_init(struct device_node *dp)
                ((regs->phys_addr >> 32UL) & 0x0fffffff);
 }
 
-static void __init irq_trans_init(struct device_node *dp)
+void __init irq_trans_init(struct device_node *dp)
 {
 #ifdef CONFIG_PCI
        const char *model;
@@ -976,709 +840,3 @@ static void __init irq_trans_init(struct device_node *dp)
                return;
        }
 }
-
-static int is_root_node(const struct device_node *dp)
-{
-       if (!dp)
-               return 0;
-
-       return (dp->parent == NULL);
-}
-
-/* The following routines deal with the black magic of fully naming a
- * node.
- *
- * Certain well known named nodes are just the simple name string.
- *
- * Actual devices have an address specifier appended to the base name
- * string, like this "foo@addr".  The "addr" can be in any number of
- * formats, and the platform plus the type of the node determine the
- * format and how it is constructed.
- *
- * For children of the ROOT node, the naming convention is fixed and
- * determined by whether this is a sun4u or sun4v system.
- *
- * For children of other nodes, it is bus type specific.  So
- * we walk up the tree until we discover a "device_type" property
- * we recognize and we go from there.
- *
- * As an example, the boot device on my workstation has a full path:
- *
- *     /pci@1e,600000/ide@d/disk@0,0:c
- */
-static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
-{
-       struct linux_prom64_registers *regs;
-       struct property *rprop;
-       u32 high_bits, low_bits, type;
-
-       rprop = of_find_property(dp, "reg", NULL);
-       if (!rprop)
-               return;
-
-       regs = rprop->value;
-       if (!is_root_node(dp->parent)) {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
-                       (unsigned int) (regs->phys_addr >> 32UL),
-                       (unsigned int) (regs->phys_addr & 0xffffffffUL));
-               return;
-       }
-
-       type = regs->phys_addr >> 60UL;
-       high_bits = (regs->phys_addr >> 32UL) & 0x0fffffffUL;
-       low_bits = (regs->phys_addr & 0xffffffffUL);
-
-       if (type == 0 || type == 8) {
-               const char *prefix = (type == 0) ? "m" : "i";
-
-               if (low_bits)
-                       sprintf(tmp_buf, "%s@%s%x,%x",
-                               dp->name, prefix,
-                               high_bits, low_bits);
-               else
-                       sprintf(tmp_buf, "%s@%s%x",
-                               dp->name,
-                               prefix,
-                               high_bits);
-       } else if (type == 12) {
-               sprintf(tmp_buf, "%s@%x",
-                       dp->name, high_bits);
-       }
-}
-
-static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
-{
-       struct linux_prom64_registers *regs;
-       struct property *prop;
-
-       prop = of_find_property(dp, "reg", NULL);
-       if (!prop)
-               return;
-
-       regs = prop->value;
-       if (!is_root_node(dp->parent)) {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
-                       (unsigned int) (regs->phys_addr >> 32UL),
-                       (unsigned int) (regs->phys_addr & 0xffffffffUL));
-               return;
-       }
-
-       prop = of_find_property(dp, "upa-portid", NULL);
-       if (!prop)
-               prop = of_find_property(dp, "portid", NULL);
-       if (prop) {
-               unsigned long mask = 0xffffffffUL;
-
-               if (tlb_type >= cheetah)
-                       mask = 0x7fffff;
-
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
-                       *(u32 *)prop->value,
-                       (unsigned int) (regs->phys_addr & mask));
-       }
-}
-
-/* "name@slot,offset"  */
-static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
-{
-       struct linux_prom_registers *regs;
-       struct property *prop;
-
-       prop = of_find_property(dp, "reg", NULL);
-       if (!prop)
-               return;
-
-       regs = prop->value;
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
-               regs->which_io,
-               regs->phys_addr);
-}
-
-/* "name@devnum[,func]" */
-static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
-{
-       struct linux_prom_pci_registers *regs;
-       struct property *prop;
-       unsigned int devfn;
-
-       prop = of_find_property(dp, "reg", NULL);
-       if (!prop)
-               return;
-
-       regs = prop->value;
-       devfn = (regs->phys_hi >> 8) & 0xff;
-       if (devfn & 0x07) {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
-                       devfn >> 3,
-                       devfn & 0x07);
-       } else {
-               sprintf(tmp_buf, "%s@%x",
-                       dp->name,
-                       devfn >> 3);
-       }
-}
-
-/* "name@UPA_PORTID,offset" */
-static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
-{
-       struct linux_prom64_registers *regs;
-       struct property *prop;
-
-       prop = of_find_property(dp, "reg", NULL);
-       if (!prop)
-               return;
-
-       regs = prop->value;
-
-       prop = of_find_property(dp, "upa-portid", NULL);
-       if (!prop)
-               return;
-
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
-               *(u32 *) prop->value,
-               (unsigned int) (regs->phys_addr & 0xffffffffUL));
-}
-
-/* "name@reg" */
-static void __init vdev_path_component(struct device_node *dp, char *tmp_buf)
-{
-       struct property *prop;
-       u32 *regs;
-
-       prop = of_find_property(dp, "reg", NULL);
-       if (!prop)
-               return;
-
-       regs = prop->value;
-
-       sprintf(tmp_buf, "%s@%x", dp->name, *regs);
-}
-
-/* "name@addrhi,addrlo" */
-static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
-{
-       struct linux_prom64_registers *regs;
-       struct property *prop;
-
-       prop = of_find_property(dp, "reg", NULL);
-       if (!prop)
-               return;
-
-       regs = prop->value;
-
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
-               (unsigned int) (regs->phys_addr >> 32UL),
-               (unsigned int) (regs->phys_addr & 0xffffffffUL));
-}
-
-/* "name@bus,addr" */
-static void __init i2c_path_component(struct device_node *dp, char *tmp_buf)
-{
-       struct property *prop;
-       u32 *regs;
-
-       prop = of_find_property(dp, "reg", NULL);
-       if (!prop)
-               return;
-
-       regs = prop->value;
-
-       /* This actually isn't right... should look at the #address-cells
-        * property of the i2c bus node etc. etc.
-        */
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name, regs[0], regs[1]);
-}
-
-/* "name@reg0[,reg1]" */
-static void __init usb_path_component(struct device_node *dp, char *tmp_buf)
-{
-       struct property *prop;
-       u32 *regs;
-
-       prop = of_find_property(dp, "reg", NULL);
-       if (!prop)
-               return;
-
-       regs = prop->value;
-
-       if (prop->length == sizeof(u32) || regs[1] == 1) {
-               sprintf(tmp_buf, "%s@%x",
-                       dp->name, regs[0]);
-       } else {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name, regs[0], regs[1]);
-       }
-}
-
-/* "name@reg0reg1[,reg2reg3]" */
-static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf)
-{
-       struct property *prop;
-       u32 *regs;
-
-       prop = of_find_property(dp, "reg", NULL);
-       if (!prop)
-               return;
-
-       regs = prop->value;
-
-       if (regs[2] || regs[3]) {
-               sprintf(tmp_buf, "%s@%08x%08x,%04x%08x",
-                       dp->name, regs[0], regs[1], regs[2], regs[3]);
-       } else {
-               sprintf(tmp_buf, "%s@%08x%08x",
-                       dp->name, regs[0], regs[1]);
-       }
-}
-
-static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
-{
-       struct device_node *parent = dp->parent;
-
-       if (parent != NULL) {
-               if (!strcmp(parent->type, "pci") ||
-                   !strcmp(parent->type, "pciex")) {
-                       pci_path_component(dp, tmp_buf);
-                       return;
-               }
-               if (!strcmp(parent->type, "sbus")) {
-                       sbus_path_component(dp, tmp_buf);
-                       return;
-               }
-               if (!strcmp(parent->type, "upa")) {
-                       upa_path_component(dp, tmp_buf);
-                       return;
-               }
-               if (!strcmp(parent->type, "ebus")) {
-                       ebus_path_component(dp, tmp_buf);
-                       return;
-               }
-               if (!strcmp(parent->name, "usb") ||
-                   !strcmp(parent->name, "hub")) {
-                       usb_path_component(dp, tmp_buf);
-                       return;
-               }
-               if (!strcmp(parent->type, "i2c")) {
-                       i2c_path_component(dp, tmp_buf);
-                       return;
-               }
-               if (!strcmp(parent->type, "firewire")) {
-                       ieee1394_path_component(dp, tmp_buf);
-                       return;
-               }
-               if (!strcmp(parent->type, "virtual-devices")) {
-                       vdev_path_component(dp, tmp_buf);
-                       return;
-               }
-               /* "isa" is handled with platform naming */
-       }
-
-       /* Use platform naming convention.  */
-       if (tlb_type == hypervisor) {
-               sun4v_path_component(dp, tmp_buf);
-               return;
-       } else {
-               sun4u_path_component(dp, tmp_buf);
-       }
-}
-
-static char * __init build_path_component(struct device_node *dp)
-{
-       char tmp_buf[64], *n;
-
-       tmp_buf[0] = '\0';
-       __build_path_component(dp, tmp_buf);
-       if (tmp_buf[0] == '\0')
-               strcpy(tmp_buf, dp->name);
-
-       n = prom_early_alloc(strlen(tmp_buf) + 1);
-       strcpy(n, tmp_buf);
-
-       return n;
-}
-
-static char * __init build_full_name(struct device_node *dp)
-{
-       int len, ourlen, plen;
-       char *n;
-
-       plen = strlen(dp->parent->full_name);
-       ourlen = strlen(dp->path_component_name);
-       len = ourlen + plen + 2;
-
-       n = prom_early_alloc(len);
-       strcpy(n, dp->parent->full_name);
-       if (!is_root_node(dp->parent)) {
-               strcpy(n + plen, "/");
-               plen++;
-       }
-       strcpy(n + plen, dp->path_component_name);
-
-       return n;
-}
-
-static unsigned int unique_id;
-
-static struct property * __init build_one_prop(phandle node, char *prev, char *special_name, void *special_val, int special_len)
-{
-       static struct property *tmp = NULL;
-       struct property *p;
-
-       if (tmp) {
-               p = tmp;
-               memset(p, 0, sizeof(*p) + 32);
-               tmp = NULL;
-       } else {
-               p = prom_early_alloc(sizeof(struct property) + 32);
-               p->unique_id = unique_id++;
-       }
-
-       p->name = (char *) (p + 1);
-       if (special_name) {
-               strcpy(p->name, special_name);
-               p->length = special_len;
-               p->value = prom_early_alloc(special_len);
-               memcpy(p->value, special_val, special_len);
-       } else {
-               if (prev == NULL) {
-                       prom_firstprop(node, p->name);
-               } else {
-                       prom_nextprop(node, prev, p->name);
-               }
-               if (strlen(p->name) == 0) {
-                       tmp = p;
-                       return NULL;
-               }
-               p->length = prom_getproplen(node, p->name);
-               if (p->length <= 0) {
-                       p->length = 0;
-               } else {
-                       p->value = prom_early_alloc(p->length + 1);
-                       prom_getproperty(node, p->name, p->value, p->length);
-                       ((unsigned char *)p->value)[p->length] = '\0';
-               }
-       }
-       return p;
-}
-
-static struct property * __init build_prop_list(phandle node)
-{
-       struct property *head, *tail;
-
-       head = tail = build_one_prop(node, NULL,
-                                    ".node", &node, sizeof(node));
-
-       tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
-       tail = tail->next;
-       while(tail) {
-               tail->next = build_one_prop(node, tail->name,
-                                           NULL, NULL, 0);
-               tail = tail->next;
-       }
-
-       return head;
-}
-
-static char * __init get_one_property(phandle node, const char *name)
-{
-       char *buf = "<NULL>";
-       int len;
-
-       len = prom_getproplen(node, name);
-       if (len > 0) {
-               buf = prom_early_alloc(len);
-               prom_getproperty(node, name, buf, len);
-       }
-
-       return buf;
-}
-
-static struct device_node * __init create_node(phandle node, struct device_node *parent)
-{
-       struct device_node *dp;
-
-       if (!node)
-               return NULL;
-
-       dp = prom_early_alloc(sizeof(*dp));
-       dp->unique_id = unique_id++;
-       dp->parent = parent;
-
-       kref_init(&dp->kref);
-
-       dp->name = get_one_property(node, "name");
-       dp->type = get_one_property(node, "device_type");
-       dp->node = node;
-
-       dp->properties = build_prop_list(node);
-
-       irq_trans_init(dp);
-
-       return dp;
-}
-
-static struct device_node * __init build_tree(struct device_node *parent, phandle node, struct device_node ***nextp)
-{
-       struct device_node *ret = NULL, *prev_sibling = NULL;
-       struct device_node *dp;
-
-       while (1) {
-               dp = create_node(node, parent);
-               if (!dp)
-                       break;
-
-               if (prev_sibling)
-                       prev_sibling->sibling = dp;
-
-               if (!ret)
-                       ret = dp;
-               prev_sibling = dp;
-
-               *(*nextp) = dp;
-               *nextp = &dp->allnext;
-
-               dp->path_component_name = build_path_component(dp);
-               dp->full_name = build_full_name(dp);
-
-               dp->child = build_tree(dp, prom_getchild(node), nextp);
-
-               node = prom_getsibling(node);
-       }
-
-       return ret;
-}
-
-static const char *get_mid_prop(void)
-{
-       return (tlb_type == spitfire ? "upa-portid" : "portid");
-}
-
-struct device_node *of_find_node_by_cpuid(int cpuid)
-{
-       struct device_node *dp;
-       const char *mid_prop = get_mid_prop();
-
-       for_each_node_by_type(dp, "cpu") {
-               int id = of_getintprop_default(dp, mid_prop, -1);
-               const char *this_mid_prop = mid_prop;
-
-               if (id < 0) {
-                       this_mid_prop = "cpuid";
-                       id = of_getintprop_default(dp, this_mid_prop, -1);
-               }
-
-               if (id < 0) {
-                       prom_printf("OF: Serious problem, cpu lacks "
-                                   "%s property", this_mid_prop);
-                       prom_halt();
-               }
-               if (cpuid == id)
-                       return dp;
-       }
-       return NULL;
-}
-
-static void __init of_fill_in_cpu_data(void)
-{
-       struct device_node *dp;
-       const char *mid_prop = get_mid_prop();
-
-       ncpus_probed = 0;
-       for_each_node_by_type(dp, "cpu") {
-               int cpuid = of_getintprop_default(dp, mid_prop, -1);
-               const char *this_mid_prop = mid_prop;
-               struct device_node *portid_parent;
-               int portid = -1;
-
-               portid_parent = NULL;
-               if (cpuid < 0) {
-                       this_mid_prop = "cpuid";
-                       cpuid = of_getintprop_default(dp, this_mid_prop, -1);
-                       if (cpuid >= 0) {
-                               int limit = 2;
-
-                               portid_parent = dp;
-                               while (limit--) {
-                                       portid_parent = portid_parent->parent;
-                                       if (!portid_parent)
-                                               break;
-                                       portid = of_getintprop_default(portid_parent,
-                                                                      "portid", -1);
-                                       if (portid >= 0)
-                                               break;
-                               }
-                       }
-               }
-
-               if (cpuid < 0) {
-                       prom_printf("OF: Serious problem, cpu lacks "
-                                   "%s property", this_mid_prop);
-                       prom_halt();
-               }
-
-               ncpus_probed++;
-
-#ifdef CONFIG_SMP
-               if (cpuid >= NR_CPUS) {
-                       printk(KERN_WARNING "Ignoring CPU %d which is "
-                              ">= NR_CPUS (%d)\n",
-                              cpuid, NR_CPUS);
-                       continue;
-               }
-#else
-               /* On uniprocessor we only want the values for the
-                * real physical cpu the kernel booted onto, however
-                * cpu_data() only has one entry at index 0.
-                */
-               if (cpuid != real_hard_smp_processor_id())
-                       continue;
-               cpuid = 0;
-#endif
-
-               cpu_data(cpuid).clock_tick =
-                       of_getintprop_default(dp, "clock-frequency", 0);
-
-               if (portid_parent) {
-                       cpu_data(cpuid).dcache_size =
-                               of_getintprop_default(dp, "l1-dcache-size",
-                                                     16 * 1024);
-                       cpu_data(cpuid).dcache_line_size =
-                               of_getintprop_default(dp, "l1-dcache-line-size",
-                                                     32);
-                       cpu_data(cpuid).icache_size =
-                               of_getintprop_default(dp, "l1-icache-size",
-                                                     8 * 1024);
-                       cpu_data(cpuid).icache_line_size =
-                               of_getintprop_default(dp, "l1-icache-line-size",
-                                                     32);
-                       cpu_data(cpuid).ecache_size =
-                               of_getintprop_default(dp, "l2-cache-size", 0);
-                       cpu_data(cpuid).ecache_line_size =
-                               of_getintprop_default(dp, "l2-cache-line-size", 0);
-                       if (!cpu_data(cpuid).ecache_size ||
-                           !cpu_data(cpuid).ecache_line_size) {
-                               cpu_data(cpuid).ecache_size =
-                                       of_getintprop_default(portid_parent,
-                                                             "l2-cache-size",
-                                                             (4 * 1024 * 1024));
-                               cpu_data(cpuid).ecache_line_size =
-                                       of_getintprop_default(portid_parent,
-                                                             "l2-cache-line-size", 64);
-                       }
-
-                       cpu_data(cpuid).core_id = portid + 1;
-                       cpu_data(cpuid).proc_id = portid;
-#ifdef CONFIG_SMP
-                       sparc64_multi_core = 1;
-#endif
-               } else {
-                       cpu_data(cpuid).dcache_size =
-                               of_getintprop_default(dp, "dcache-size", 16 * 1024);
-                       cpu_data(cpuid).dcache_line_size =
-                               of_getintprop_default(dp, "dcache-line-size", 32);
-
-                       cpu_data(cpuid).icache_size =
-                               of_getintprop_default(dp, "icache-size", 16 * 1024);
-                       cpu_data(cpuid).icache_line_size =
-                               of_getintprop_default(dp, "icache-line-size", 32);
-
-                       cpu_data(cpuid).ecache_size =
-                               of_getintprop_default(dp, "ecache-size",
-                                                     (4 * 1024 * 1024));
-                       cpu_data(cpuid).ecache_line_size =
-                               of_getintprop_default(dp, "ecache-line-size", 64);
-
-                       cpu_data(cpuid).core_id = 0;
-                       cpu_data(cpuid).proc_id = -1;
-               }
-
-#ifdef CONFIG_SMP
-               cpu_set(cpuid, cpu_present_map);
-               cpu_set(cpuid, cpu_possible_map);
-#endif
-       }
-
-       smp_fill_in_sib_core_maps();
-}
-
-struct device_node *of_console_device;
-EXPORT_SYMBOL(of_console_device);
-
-char *of_console_path;
-EXPORT_SYMBOL(of_console_path);
-
-char *of_console_options;
-EXPORT_SYMBOL(of_console_options);
-
-static void __init of_console_init(void)
-{
-       char *msg = "OF stdout device is: %s\n";
-       struct device_node *dp;
-       const char *type;
-       phandle node;
-
-       of_console_path = prom_early_alloc(256);
-       if (prom_ihandle2path(prom_stdout, of_console_path, 256) < 0) {
-               prom_printf("Cannot obtain path of stdout.\n");
-               prom_halt();
-       }
-       of_console_options = strrchr(of_console_path, ':');
-       if (of_console_options) {
-               of_console_options++;
-               if (*of_console_options == '\0')
-                       of_console_options = NULL;
-       }
-
-       node = prom_inst2pkg(prom_stdout);
-       if (!node) {
-               prom_printf("Cannot resolve stdout node from "
-                           "instance %08x.\n", prom_stdout);
-               prom_halt();
-       }
-
-       dp = of_find_node_by_phandle(node);
-       type = of_get_property(dp, "device_type", NULL);
-       if (!type) {
-               prom_printf("Console stdout lacks device_type property.\n");
-               prom_halt();
-       }
-
-       if (strcmp(type, "display") && strcmp(type, "serial")) {
-               prom_printf("Console device_type is neither display "
-                           "nor serial.\n");
-               prom_halt();
-       }
-
-       of_console_device = dp;
-
-       printk(msg, of_console_path);
-}
-
-void __init prom_build_devicetree(void)
-{
-       struct device_node **nextp;
-
-       allnodes = create_node(prom_root_node, NULL);
-       allnodes->path_component_name = "";
-       allnodes->full_name = "/";
-
-       nextp = &allnodes->allnext;
-       allnodes->child = build_tree(allnodes,
-                                    prom_getchild(allnodes->node),
-                                    &nextp);
-       of_console_init();
-
-       printk("PROM: Built device tree with %u bytes of memory.\n",
-              prom_early_allocated);
-
-       if (tlb_type != hypervisor)
-               of_fill_in_cpu_data();
-}
similarity index 95%
rename from arch/sparc64/kernel/rtrap.S
rename to arch/sparc/kernel/rtrap_64.S
index 97a993c1f7f31bd62e7b310b08077b084622adff..fd3cee4d117c66ddc0fce0c3ef89bd923cd4f3b2 100644 (file)
@@ -14,9 +14,9 @@
 #include <asm/visasm.h>
 #include <asm/processor.h>
 
-#define                RTRAP_PSTATE            (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
-#define                RTRAP_PSTATE_IRQOFF     (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV)
-#define                RTRAP_PSTATE_AG_IRQOFF  (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
+#define                RTRAP_PSTATE            (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
+#define                RTRAP_PSTATE_IRQOFF     (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
+#define                RTRAP_PSTATE_AG_IRQOFF  (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
 
                .text
                .align                  32
@@ -132,6 +132,18 @@ __handle_signal:
                ba,pt                   %xcc, __handle_signal_continue
                 andn                   %l1, %l4, %l1
 
+               /* When returning from a NMI (%pil==15) interrupt we want to
+                * avoid running softirqs, doing IRQ tracing, preempting, etc.
+                */
+               .globl                  rtrap_nmi
+rtrap_nmi:     ldx                     [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+               sethi                   %hi(0xf << 20), %l4
+               and                     %l1, %l4, %l4
+               andn                    %l1, %l4, %l1
+               srl                     %l4, 20, %l4
+               ba,pt                   %xcc, rtrap_no_irq_enable
+                wrpr                   %l4, %pil
+
                .align                  64
                .globl                  rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
 rtrap_irq:
@@ -161,8 +173,8 @@ rtrap_xcall:
                call                    trace_hardirqs_on
                 nop
                wrpr                    %l4, %pil
-rtrap_no_irq_enable:
 #endif
+rtrap_no_irq_enable:
                andcc                   %l1, TSTATE_PRIV, %l3
                bne,pn                  %icc, to_kernel
                 nop
similarity index 98%
rename from arch/sparc/kernel/setup.c
rename to arch/sparc/kernel/setup_32.c
index 24fe3078bd4bf8d400d4b081745cd3770150e1b8..c96c65d1b58b94362b437964fda1be4e2155896f 100644 (file)
@@ -46,6 +46,8 @@
 #include <asm/cpudata.h>
 #include <asm/setup.h>
 
+#include "kernel.h"
+
 struct screen_info screen_info = {
        0, 0,                   /* orig-x, orig-y */
        0,                      /* unused */
@@ -308,9 +310,6 @@ void __init setup_arch(char **cmdline_p)
        smp_setup_cpu_possible_map();
 }
 
-extern char *sparc_cpu_type;
-extern char *sparc_fpu_type;
-
 static int ncpus_probed;
 
 static int show_cpuinfo(struct seq_file *m, void *__unused)
@@ -328,8 +327,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
                   "CPU0ClkTck\t: %ld\n"
 #endif
                   ,
-                  sparc_cpu_type ? sparc_cpu_type : "undetermined",
-                  sparc_fpu_type ? sparc_fpu_type : "undetermined",
+                  sparc_cpu_type,
+                  sparc_fpu_type ,
                   romvec->pv_romvers,
                   prom_rev,
                   romvec->pv_printrev >> 16,
similarity index 99%
rename from arch/sparc64/kernel/setup.c
rename to arch/sparc/kernel/setup_64.c
index c8b03a4f68bf8554a5f7e31b0c873474bac730f7..555db7452ebe2321ffbdcb7f19ca7e1b80522f9d 100644 (file)
@@ -52,6 +52,7 @@
 #endif
 
 #include "entry.h"
+#include "kernel.h"
 
 /* Used to synchronize accesses to NatSemi SUPER I/O chip configure
  * operations in asm/ns87303.h
similarity index 99%
rename from arch/sparc64/kernel/smp.c
rename to arch/sparc/kernel/smp_64.c
index f500b0618bb0d3b4badcdee2c5c8cb20225c97e9..bfe99d82d458702d32bf52a863cf00345e04a8e2 100644 (file)
@@ -163,7 +163,7 @@ static inline long get_delta (long *rt, long *master)
        for (i = 0; i < NUM_ITERS; i++) {
                t0 = tick_ops->get_tick();
                go[MASTER] = 1;
-               membar_storeload();
+               membar_safe("#StoreLoad");
                while (!(tm = go[SLAVE]))
                        rmb();
                go[SLAVE] = 0;
@@ -257,7 +257,7 @@ static void smp_synchronize_one_tick(int cpu)
 
        /* now let the client proceed into his loop */
        go[MASTER] = 0;
-       membar_storeload();
+       membar_safe("#StoreLoad");
 
        spin_lock_irqsave(&itc_sync_lock, flags);
        {
@@ -267,7 +267,7 @@ static void smp_synchronize_one_tick(int cpu)
                        go[MASTER] = 0;
                        wmb();
                        go[SLAVE] = tick_ops->get_tick();
-                       membar_storeload();
+                       membar_safe("#StoreLoad");
                }
        }
        spin_unlock_irqrestore(&itc_sync_lock, flags);
@@ -773,7 +773,7 @@ static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask
 
        /* Setup the initial cpu list.  */
        cnt = 0;
-       for_each_cpu_mask_nr(i, *mask) {
+       for_each_cpu(i, mask) {
                if (i == this_cpu || !cpu_online(i))
                        continue;
                cpu_list[cnt++] = i;
@@ -1122,7 +1122,6 @@ void smp_capture(void)
                       smp_processor_id());
 #endif
                penguins_are_doing_time = 1;
-               membar_storestore_loadstore();
                atomic_inc(&smp_capture_registry);
                smp_cross_call(&xcall_capture, 0, 0, 0);
                while (atomic_read(&smp_capture_registry) != ncpus)
@@ -1142,13 +1141,13 @@ void smp_release(void)
                       smp_processor_id());
 #endif
                penguins_are_doing_time = 0;
-               membar_storeload_storestore();
+               membar_safe("#StoreLoad");
                atomic_dec(&smp_capture_registry);
        }
 }
 
-/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
- * can service tlb flush xcalls...
+/* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
+ * set, so they can service tlb flush xcalls...
  */
 extern void prom_world(int);
 
@@ -1161,7 +1160,7 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
        __asm__ __volatile__("flushw");
        prom_world(1);
        atomic_inc(&smp_capture_registry);
-       membar_storeload_storestore();
+       membar_safe("#StoreLoad");
        while (penguins_are_doing_time)
                rmb();
        atomic_dec(&smp_capture_registry);
similarity index 98%
rename from arch/sparc/kernel/sparc_ksyms.c
rename to arch/sparc/kernel/sparc_ksyms_32.c
index b0dfff84865365a595e39283e480b1ccbd9eb850..a4d45fc29b21e4ac9c9c069bdab7b8c277c2f6f5 100644 (file)
@@ -61,7 +61,6 @@ extern void (*bzero_1page)(void *);
 extern void *__bzero(void *, size_t);
 extern void *__memscan_zero(void *, size_t);
 extern void *__memscan_generic(void *, int, size_t);
-extern int __memcmp(const void *, const void *, __kernel_size_t);
 extern int __strncmp(const char *, const char *, __kernel_size_t);
 
 extern int __ashrdi3(int, int);
@@ -122,10 +121,8 @@ EXPORT_SYMBOL(phys_cpu_present_map);
 EXPORT_SYMBOL(__udelay);
 EXPORT_SYMBOL(__ndelay);
 EXPORT_SYMBOL(rtc_lock);
-#ifdef CONFIG_SUN_AUXIO
 EXPORT_SYMBOL(set_auxio);
 EXPORT_SYMBOL(get_auxio);
-#endif
 EXPORT_SYMBOL(io_remap_pfn_range);
 
 #ifndef CONFIG_SMP
@@ -213,7 +210,6 @@ EXPORT_SYMBOL(bzero_1page);
 EXPORT_SYMBOL(__bzero);
 EXPORT_SYMBOL(__memscan_zero);
 EXPORT_SYMBOL(__memscan_generic);
-EXPORT_SYMBOL(__memcmp);
 EXPORT_SYMBOL(__strncmp);
 EXPORT_SYMBOL(__memmove);
 
similarity index 97%
rename from arch/sparc64/kernel/sparc64_ksyms.c
rename to arch/sparc/kernel/sparc_ksyms_64.c
index 30bba8b0a3b0f91dadcc2aac44b0de329e5ec430..0133211ab6344e10bf60b9defd9165509c984e81 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/timer.h>
 #include <asm/cpudata.h>
 #include <asm/ftrace.h>
+#include <asm/hypervisor.h>
 
 struct poll {
        int fd;
@@ -61,7 +62,6 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
 extern void *__bzero(void *, size_t);
 extern void *__memscan_zero(void *, size_t);
 extern void *__memscan_generic(void *, int, size_t);
-extern int __memcmp(const void *, const void *, __kernel_size_t);
 extern __kernel_size_t strlen(const char *);
 extern void sys_sigsuspend(void);
 extern int compat_sys_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
@@ -148,10 +148,13 @@ EXPORT_SYMBOL(flush_dcache_page);
 EXPORT_SYMBOL(__flush_dcache_range);
 #endif
 
-#ifdef CONFIG_SUN_AUXIO
+EXPORT_SYMBOL(sun4v_niagara_getperf);
+EXPORT_SYMBOL(sun4v_niagara_setperf);
+EXPORT_SYMBOL(sun4v_niagara2_getperf);
+EXPORT_SYMBOL(sun4v_niagara2_setperf);
+
 EXPORT_SYMBOL(auxio_set_led);
 EXPORT_SYMBOL(auxio_set_lte);
-#endif
 #ifdef CONFIG_SBUS
 EXPORT_SYMBOL(sbus_set_sbus64);
 #endif
@@ -177,7 +180,6 @@ EXPORT_SYMBOL(pci_dma_supported);
 EXPORT_SYMBOL(io_remap_pfn_range);
 
 EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(put_fs_struct);
 
 /* math-emu wants this */
 EXPORT_SYMBOL(die_if_kernel);
@@ -219,7 +221,6 @@ EXPORT_SYMBOL(copy_user_page);
 EXPORT_SYMBOL(__bzero);
 EXPORT_SYMBOL(__memscan_zero);
 EXPORT_SYMBOL(__memscan_generic);
-EXPORT_SYMBOL(__memcmp);
 EXPORT_SYMBOL(__memset);
 
 EXPORT_SYMBOL(csum_partial);
similarity index 99%
rename from arch/sparc64/kernel/spiterrs.S
rename to arch/sparc/kernel/spiterrs.S
index ef902c6f8e3cefa641f0267d3c388e31279a4553..c357e40ffd01526ca38a824c12ac077cdba135c6 100644 (file)
@@ -80,7 +80,7 @@ __spitfire_cee_trap_continue:
        cmp             %g2, 1
        rdpr            %pil, %g2
        bleu,pt         %xcc, 1f
-        wrpr           %g0, 15, %pil
+        wrpr           %g0, PIL_NORMAL_MAX, %pil
 
        ba,pt           %xcc, etraptl1
         rd             %pc, %g7
similarity index 60%
rename from arch/sparc64/kernel/stacktrace.c
rename to arch/sparc/kernel/stacktrace.c
index 4e21d4a57d3b3f2ea536ed5707d61fbf6bc94181..acb12f6737570ebb7e4e8a98d8c4e741e1cb50b4 100644 (file)
@@ -7,17 +7,18 @@
 
 #include "kstack.h"
 
-void save_stack_trace(struct stack_trace *trace)
+static void __save_stack_trace(struct thread_info *tp,
+                              struct stack_trace *trace,
+                              bool skip_sched)
 {
-       struct thread_info *tp = task_thread_info(current);
        unsigned long ksp, fp;
 
-       stack_trace_flush();
-
-       __asm__ __volatile__(
-               "mov    %%fp, %0"
-               : "=r" (ksp)
-       );
+       if (tp == current_thread_info()) {
+               stack_trace_flush();
+               __asm__ __volatile__("mov %%fp, %0" : "=r" (ksp));
+       } else {
+               ksp = tp->ksp;
+       }
 
        fp = ksp + STACK_BIAS;
        do {
@@ -43,8 +44,21 @@ void save_stack_trace(struct stack_trace *trace)
 
                if (trace->skip > 0)
                        trace->skip--;
-               else
+               else if (!skip_sched || !in_sched_functions(pc))
                        trace->entries[trace->nr_entries++] = pc;
        } while (trace->nr_entries < trace->max_entries);
 }
+
+void save_stack_trace(struct stack_trace *trace)
+{
+       __save_stack_trace(current_thread_info(), trace, false);
+}
 EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+       struct thread_info *tp = task_thread_info(tsk);
+
+       __save_stack_trace(tp, trace, true);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
index 5dc8a5769489206ba45e64b12e6d471bd7923526..bc3adbf79c6a8006ca61053f905cde8474cec9fd 100644 (file)
@@ -160,6 +160,7 @@ static void __init sun4c_init_timers(irq_handler_t counter_fn)
        sun4c_timers = (void __iomem *) (unsigned long) addr[0];
 
        irq = of_get_property(dp, "intr", NULL);
+       of_node_put(dp);
        if (!irq) {
                prom_printf("sun4c_init_timers: No intr property\n");
                prom_halt();
@@ -200,6 +201,7 @@ void __init sun4c_init_IRQ(void)
        }
 
        addr = of_get_property(dp, "address", NULL);
+       of_node_put(dp);
        if (!addr) {
                prom_printf("sun4c_init_IRQ: No address property\n");
                prom_halt();
index d3cb76ce418bde420d5d7e397679091ba5cd5cf8..3369fef5b4b3acd2749ddcbefd1befd37df2e15b 100644 (file)
@@ -40,6 +40,7 @@
 #include <asm/cacheflush.h>
 #include <asm/irq_regs.h>
 
+#include "kernel.h"
 #include "irq.h"
 
 /* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
@@ -58,7 +59,6 @@ static struct sun4d_timer_regs __iomem *sun4d_timers;
 #define TIMER_IRQ      10
 
 #define MAX_STATIC_ALLOC       4
-extern struct irqaction static_irqaction[MAX_STATIC_ALLOC];
 extern int static_irq_count;
 static unsigned char sbus_tid[32];
 
@@ -508,6 +508,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
         * bootbus.
         */
        reg = of_get_property(dp, "reg", NULL);
+       of_node_put(dp);
        if (!reg) {
                prom_printf("sun4d_init_timers: No reg property\n");
                prom_halt();
index f10317179ee607c650ab837a5bb24ef34836e1b7..301892e2d7186fd41d32da167ff6218a5e5ce9d2 100644 (file)
@@ -374,6 +374,7 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
        }
 
        addr = of_get_property(dp, "address", &len);
+       of_node_put(dp);
        if (!addr) {
                printk(KERN_ERR "sun4m_init_timers: No 'address' prop.\n");
                return;
@@ -437,6 +438,7 @@ void __init sun4m_init_IRQ(void)
        }
 
        addr = of_get_property(dp, "address", &len);
+       of_node_put(dp);
        if (!addr) {
                printk(KERN_ERR "sun4m_init_IRQ: No 'address' prop.\n");
                return;
similarity index 98%
rename from arch/sparc64/kernel/sun4v_ivec.S
rename to arch/sparc/kernel/sun4v_ivec.S
index e2f8e1b4882a7f8cbc0c9a77c4eef33f473b7cac..559bc5e9c199232d092ec425d705a016b639db9a 100644 (file)
@@ -186,7 +186,7 @@ sun4v_res_mondo:
         * when it's done.
         */
        rdpr    %pil, %g2
-       wrpr    %g0, 15, %pil
+       wrpr    %g0, PIL_NORMAL_MAX, %pil
        mov     %g1, %g4
        ba,pt   %xcc, etrap_irq
         rd     %pc, %g7
@@ -216,7 +216,7 @@ sun4v_res_mondo_queue_full:
        membar  #Sync
 
        rdpr    %pil, %g2
-       wrpr    %g0, 15, %pil
+       wrpr    %g0, PIL_NORMAL_MAX, %pil
        ba,pt   %xcc, etrap_irq
         rd     %pc, %g7
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -297,7 +297,7 @@ sun4v_nonres_mondo:
         * when it's done.
         */
        rdpr    %pil, %g2
-       wrpr    %g0, 15, %pil
+       wrpr    %g0, PIL_NORMAL_MAX, %pil
        mov     %g1, %g4
        ba,pt   %xcc, etrap_irq
         rd     %pc, %g7
@@ -327,7 +327,7 @@ sun4v_nonres_mondo_queue_full:
        membar  #Sync
 
        rdpr    %pil, %g2
-       wrpr    %g0, 15, %pil
+       wrpr    %g0, PIL_NORMAL_MAX, %pil
        ba,pt   %xcc, etrap_irq
         rd     %pc, %g7
 #ifdef CONFIG_TRACE_IRQFLAGS
similarity index 99%
rename from arch/sparc64/kernel/sysfs.c
rename to arch/sparc/kernel/sysfs.c
index 84e5ce146713383922f07f9e5b1c677874067344..d28f496f466913429dbd727893b824c0fefc39cf 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/percpu.h>
 #include <linux/init.h>
 
+#include <asm/cpudata.h>
 #include <asm/hypervisor.h>
 #include <asm/spitfire.h>
 
similarity index 98%
rename from arch/sparc64/kernel/trampoline.S
rename to arch/sparc/kernel/trampoline_64.S
index 83abd5ae88a49c6458de439ee2bac8846bf5d336..da1b781b5e65bd8f84d210f7bdb2e9d00022ae5c 100644 (file)
@@ -109,7 +109,6 @@ startup_continue:
         */
        sethi           %hi(prom_entry_lock), %g2
 1:     ldstub          [%g2 + %lo(prom_entry_lock)], %g1
-       membar          #StoreLoad | #StoreStore
        brnz,pn         %g1, 1b
         nop
 
@@ -214,7 +213,6 @@ startup_continue:
 
        sethi           %hi(prom_entry_lock), %g2
        stb             %g0, [%g2 + %lo(prom_entry_lock)]
-       membar          #StoreStore | #StoreLoad
 
        ba,pt           %xcc, after_lock_tlb
         nop
@@ -330,7 +328,6 @@ after_lock_tlb:
 
        sethi           %hi(prom_entry_lock), %g2
 1:     ldstub          [%g2 + %lo(prom_entry_lock)], %g1
-       membar          #StoreLoad | #StoreStore
        brnz,pn         %g1, 1b
         nop
 
@@ -394,7 +391,6 @@ after_lock_tlb:
 
 3:     sethi           %hi(prom_entry_lock), %g2
        stb             %g0, [%g2 + %lo(prom_entry_lock)]
-       membar          #StoreStore | #StoreLoad
 
        ldx             [%l0], %g6
        ldx             [%g6 + TI_TASK], %g4
similarity index 94%
rename from arch/sparc/kernel/traps.c
rename to arch/sparc/kernel/traps_32.c
index 2b7d50659036e2daa54e47e7deb23c875f7a6726..716f3946c494de35d74bf74c567c4fc5f7ee79fc 100644 (file)
 #include <asm/unistd.h>
 #include <asm/traps.h>
 
-/* #define TRAP_DEBUG */
-
-struct trap_trace_entry {
-       unsigned long pc;
-       unsigned long type;
-};
-
-void syscall_trace_entry(struct pt_regs *regs)
-{
-       printk("%s[%d]: ", current->comm, task_pid_nr(current));
-       printk("scall<%d> (could be %d)\n", (int) regs->u_regs[UREG_G1],
-              (int) regs->u_regs[UREG_I0]);
-}
-
-void syscall_trace_exit(struct pt_regs *regs)
-{
-}
+#include "entry.h"
+#include "kernel.h"
 
-void sun4d_nmi(struct pt_regs *regs)
-{
-       printk("Aieee: sun4d NMI received!\n");
-       printk("you lose buddy boy...\n");
-       show_regs(regs);
-       prom_halt();
-}
+/* #define TRAP_DEBUG */
 
 static void instruction_dump(unsigned long *pc)
 {
@@ -134,7 +113,6 @@ void do_hw_interrupt(struct pt_regs *regs, unsigned long type)
 void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
                            unsigned long psr)
 {
-       extern int do_user_muldiv (struct pt_regs *, unsigned long);
        siginfo_t info;
 
        if(psr & PSR_PS)
@@ -195,10 +173,6 @@ void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned lon
        send_sig_info(SIGBUS, &info, current);
 }
 
-extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
-                  void *fpqueue, unsigned long *fpqdepth);
-extern void fpload(unsigned long *fpregs, unsigned long *fsr);
-
 static unsigned long init_fsr = 0x0UL;
 static unsigned long init_fregs[32] __attribute__ ((aligned (8))) =
                 { ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL,
@@ -456,8 +430,6 @@ void do_BUG(const char *file, int line)
  * up here so that timer interrupts work during initialization.
  */
 
-extern void sparc_cpu_startup(void);
-
 void trap_init(void)
 {
        extern void thread_info_offsets_are_bolixed_pete(void);
similarity index 99%
rename from arch/sparc64/kernel/traps.c
rename to arch/sparc/kernel/traps_64.c
index 81ccd22e78d45d538225feae4b68a4d9257d1f77..4638af2f55a0bb5d50c9b0b8425001d7e0845279 100644 (file)
@@ -1371,7 +1371,6 @@ static int cheetah_fix_ce(unsigned long physaddr)
        __asm__ __volatile__("ldxa      [%0] %3, %%g0\n\t"
                             "ldxa      [%1] %3, %%g0\n\t"
                             "casxa     [%2] %3, %%g0, %%g0\n\t"
-                            "membar    #StoreLoad | #StoreStore\n\t"
                             "ldxa      [%0] %3, %%g0\n\t"
                             "ldxa      [%1] %3, %%g0\n\t"
                             "membar    #Sync"
@@ -1833,7 +1832,7 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
        }
 }
 
-/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
+/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
  * Log the event and clear the first word of the entry.
  */
 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
@@ -1881,7 +1880,7 @@ void sun4v_resum_overflow(struct pt_regs *regs)
        atomic_inc(&sun4v_resum_oflow_cnt);
 }
 
-/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
+/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
  * Log the event, clear the first word of the entry, and die.
  */
 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
similarity index 99%
rename from arch/sparc64/kernel/tsb.S
rename to arch/sparc/kernel/tsb.S
index c499214b501d007414e84a7f63d3b505193bf52d..8c91d9b29a2f24023849498a41f440b5ec1e6938 100644 (file)
@@ -317,7 +317,7 @@ tsb_flush:
        srlx    %g1, 32, %o3
        andcc   %o3, %g2, %g0
        bne,pn  %icc, 1b
-        membar #LoadLoad
+        nop
        cmp     %g1, %o1
        mov     1, %o3
        bne,pt  %xcc, 2f
@@ -327,7 +327,7 @@ tsb_flush:
        bne,pn  %xcc, 1b
         nop
 2:     retl
-        TSB_MEMBAR
+        nop
        .size   tsb_flush, .-tsb_flush
 
        /* Reload MMU related context switch state at
@@ -478,7 +478,7 @@ copy_tsb:           /* %o0=old_tsb_base, %o1=old_tsb_size
         nop
 
        retl
-        TSB_MEMBAR
+        nop
        .size           copy_tsb, .-copy_tsb
 
        /* Set the invalid bit in all TSB entries.  */
similarity index 99%
rename from arch/sparc64/kernel/ttable.S
rename to arch/sparc/kernel/ttable.S
index 1ade3d6fb7fc7d1bd748a7c47852c027328efa76..ea925503b42e59fa051d591ebf752eb708243c7d 100644 (file)
@@ -66,7 +66,7 @@ tl0_irq6:     BTRAP(0x46)
 tl0_irq7:      BTRAP(0x47) BTRAP(0x48) BTRAP(0x49)
 tl0_irq10:     BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
 tl0_irq14:     TRAP_IRQ(timer_interrupt, 14)
-tl0_irq15:     TRAP_IRQ(handler_irq, 15)
+tl0_irq15:     TRAP_NMI_IRQ(perfctr_irq, 15)
 tl0_resv050:   BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55)
 tl0_resv056:   BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b)
 tl0_resv05c:   BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f)
index 5b7e69a8c32f7259497fafbcd1e816242e690423..76267085b13b37f2934282a0dd4b95150cb97ea7 100644 (file)
@@ -1,27 +1,56 @@
-/* ld script to make SparcLinux kernel */
+/* ld script for sparc32/sparc64 kernel */
 
 #include <asm-generic/vmlinux.lds.h>
+
 #include <asm/page.h>
+#include <asm/thread_info.h>
+
+#ifdef CONFIG_SPARC32
+#define INITIAL_ADDRESS  0x10000 + SIZEOF_HEADERS
+#define TEXTSTART      0xf0004000
+
+#define SMP_CACHE_BYTES_SHIFT 5
+
+#else
+#define SMP_CACHE_BYTES_SHIFT 6
+#define INITIAL_ADDRESS 0x4000
+#define TEXTSTART      0x0000000000404000
+
+#endif
+
+#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT)
 
+#ifdef CONFIG_SPARC32
 OUTPUT_FORMAT("elf32-sparc", "elf32-sparc", "elf32-sparc")
 OUTPUT_ARCH(sparc)
 ENTRY(_start)
 jiffies = jiffies_64 + 4;
+#else
+/* sparc64 */
+OUTPUT_FORMAT("elf64-sparc", "elf64-sparc", "elf64-sparc")
+OUTPUT_ARCH(sparc:v9a)
+ENTRY(_start)
+jiffies = jiffies_64;
+#endif
+
 SECTIONS
 {
-       . = 0x10000 + SIZEOF_HEADERS;
-       .text 0xf0004000 :
+       /* swapper_low_pmd_dir is sparc64 only */
+       swapper_low_pmd_dir = 0x0000000000402000;
+       . = INITIAL_ADDRESS;
+       .text TEXTSTART :
        {
                _text = .;
                *(.text.head)
                TEXT_TEXT
                SCHED_TEXT
                LOCK_TEXT
+               KPROBES_TEXT
                *(.gnu.warning)
        } = 0
        _etext = .;
-       PROVIDE (etext = .);
-       RODATA
+
+       RO_DATA(PAGE_SIZE)
        .data : {
                DATA_DATA
                CONSTRUCTORS
@@ -29,25 +58,38 @@ SECTIONS
        .data1 : {
                *(.data1)
        }
+       . = ALIGN(SMP_CACHE_BYTES);
+       .data.cacheline_aligned : {
+               *(.data.cacheline_aligned)
+       }
+       . = ALIGN(SMP_CACHE_BYTES);
+       .data.read_mostly : {
+               *(.data.read_mostly)
+       }
+       /* End of data section */
        _edata = .;
-       PROVIDE (edata = .);
 
+       /* init_task */
+       . = ALIGN(THREAD_SIZE);
+       .data.init_task : {
+               *(.data.init_task)
+       }
        .fixup : {
                __start___fixup = .;
                *(.fixup)
                __stop___fixup = .;
        }
+       . = ALIGN(16);
        __ex_table : {
                __start___ex_table = .;
                *(__ex_table)
                __stop___ex_table = .;
        }
-
        NOTES
 
        . = ALIGN(PAGE_SIZE);
-       __init_begin = .;
        .init.text : {
+               __init_begin = .;
                _sinittext = .;
                INIT_TEXT
                _einittext = .;
@@ -65,7 +107,7 @@ SECTIONS
        .initcall.init : {
                __initcall_start = .;
                INITCALLS
-       __initcall_end = .;
+               __initcall_end = .;
        }
        .con_initcall.init : {
                __con_initcall_start = .;
@@ -74,38 +116,61 @@ SECTIONS
        }
        SECURITY_INIT
 
+       . = ALIGN(4);
+       .tsb_ldquad_phys_patch : {
+               __tsb_ldquad_phys_patch = .;
+               *(.tsb_ldquad_phys_patch)
+               __tsb_ldquad_phys_patch_end = .;
+       }
+
+       .tsb_phys_patch : {
+               __tsb_phys_patch = .;
+               *(.tsb_phys_patch)
+               __tsb_phys_patch_end = .;
+       }
+
+       .cpuid_patch : {
+               __cpuid_patch = .;
+               *(.cpuid_patch)
+               __cpuid_patch_end = .;
+       }
+
+       .sun4v_1insn_patch : {
+               __sun4v_1insn_patch = .;
+               *(.sun4v_1insn_patch)
+               __sun4v_1insn_patch_end = .;
+       }
+       .sun4v_2insn_patch : {
+               __sun4v_2insn_patch = .;
+               *(.sun4v_2insn_patch)
+               __sun4v_2insn_patch_end = .;
+       }
+
 #ifdef CONFIG_BLK_DEV_INITRD
        . = ALIGN(PAGE_SIZE);
        .init.ramfs : {
-       __initramfs_start = .;
+               __initramfs_start = .;
                *(.init.ramfs)
-       __initramfs_end = .;
+               __initramfs_end = .;
        }
 #endif
 
        PERCPU(PAGE_SIZE)
+
        . = ALIGN(PAGE_SIZE);
        __init_end = .;
-       . = ALIGN(32);
-       .data.cacheline_aligned : {
-               *(.data.cacheline_aligned)
-       }
-       . = ALIGN(32);
-       .data.read_mostly : {
-               *(.data.read_mostly)
-       }
-
        __bss_start = .;
        .sbss : {
                *(.sbss)
-               *(.scommon) }
+               *(.scommon)
+       }
        .bss : {
                *(.dynbss)
                *(.bss)
                *(COMMON)
        }
        _end = . ;
-       PROVIDE (end = .);
+
        /DISCARD/ : {
                EXIT_TEXT
                EXIT_DATA
index 6e303e10c3b94b0d111f2c39676163655cf75801..375016e19144ada6f3972792880b4f7f0a846e54 100644 (file)
@@ -1,13 +1,44 @@
 # Makefile for Sparc library files..
 #
 
-EXTRA_AFLAGS := -ansi -DST_DIV0=0x02
+asflags-y := -ansi -DST_DIV0=0x02
+ccflags-y := -Werror
 
-lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
-         strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \
-        strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
-        copy_user.o locks.o atomic.o \
-        lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o \
-        cmpdi2.o
+lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
+lib-$(CONFIG_SPARC32) += memcpy.o memset.o
+lib-y                 += strlen.o
+lib-y                 += checksum_$(BITS).o
+lib-$(CONFIG_SPARC32) += blockops.o
+lib-y                 += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o
+lib-y                 += strncpy_from_user_$(BITS).o strlen_user_$(BITS).o
+lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
+lib-$(CONFIG_SPARC32) += copy_user.o locks.o
+lib-y                 += atomic_$(BITS).o
+lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
+lib-y                 += rwsem_$(BITS).o
+lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
 
-obj-y += iomap.o atomic32.o
+lib-$(CONFIG_SPARC64) += PeeCeeI.o copy_page.o clear_page.o bzero.o
+lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
+lib-$(CONFIG_SPARC64) += VISsave.o
+lib-$(CONFIG_SPARC64) += bitops.o
+
+lib-$(CONFIG_SPARC64) += U1memcpy.o U1copy_from_user.o U1copy_to_user.o
+
+lib-$(CONFIG_SPARC64) += U3memcpy.o U3copy_from_user.o U3copy_to_user.o
+lib-$(CONFIG_SPARC64) += U3patch.o
+
+lib-$(CONFIG_SPARC64) += NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o
+lib-$(CONFIG_SPARC64) += NGpatch.o NGpage.o NGbzero.o
+
+lib-$(CONFIG_SPARC64) += NG2memcpy.o NG2copy_from_user.o NG2copy_to_user.o
+lib-$(CONFIG_SPARC64) +=  NG2patch.o NG2page.o
+
+lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o
+lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
+
+lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
+lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o
+
+obj-y                 += iomap.o
+obj-$(CONFIG_SPARC32) += atomic32.o
similarity index 84%
rename from arch/sparc64/lib/atomic.S
rename to arch/sparc/lib/atomic_64.S
index 70ac4186f62b7afc4af36eeb5472d6db868688d8..0268210ca1683f7bd0bb018953d93a22e207c701 100644 (file)
@@ -43,29 +43,10 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
        .size   atomic_sub, .-atomic_sub
 
-       /* On SMP we need to use memory barriers to ensure
-        * correct memory operation ordering, nop these out
-        * for uniprocessor.
-        */
-#ifdef CONFIG_SMP
-
-#define ATOMIC_PRE_BARRIER     membar #StoreLoad | #LoadLoad;
-#define ATOMIC_POST_BARRIER    \
-       ba,pt %xcc, 80b;        \
-       membar #StoreLoad | #StoreStore
-
-80:    retl
-        nop
-#else
-#define ATOMIC_PRE_BARRIER
-#define ATOMIC_POST_BARRIER
-#endif
-
        .globl  atomic_add_ret
        .type   atomic_add_ret,#function
 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
        BACKOFF_SETUP(%o2)
-       ATOMIC_PRE_BARRIER
 1:     lduw    [%o1], %g1
        add     %g1, %o0, %g7
        cas     [%o1], %g1, %g7
@@ -73,7 +54,6 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
        bne,pn  %icc, 2f
         add    %g7, %o0, %g7
        sra     %g7, 0, %o0
-       ATOMIC_POST_BARRIER
        retl
         nop
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
@@ -83,7 +63,6 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
        .type   atomic_sub_ret,#function
 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
        BACKOFF_SETUP(%o2)
-       ATOMIC_PRE_BARRIER
 1:     lduw    [%o1], %g1
        sub     %g1, %o0, %g7
        cas     [%o1], %g1, %g7
@@ -91,7 +70,6 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
        bne,pn  %icc, 2f
         sub    %g7, %o0, %g7
        sra     %g7, 0, %o0
-       ATOMIC_POST_BARRIER
        retl
         nop
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
@@ -131,7 +109,6 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
        .type   atomic64_add_ret,#function
 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
        BACKOFF_SETUP(%o2)
-       ATOMIC_PRE_BARRIER
 1:     ldx     [%o1], %g1
        add     %g1, %o0, %g7
        casx    [%o1], %g1, %g7
@@ -139,7 +116,6 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
        bne,pn  %xcc, 2f
         add    %g7, %o0, %g7
        mov     %g7, %o0
-       ATOMIC_POST_BARRIER
        retl
         nop
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
@@ -149,7 +125,6 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
        .type   atomic64_sub_ret,#function
 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
        BACKOFF_SETUP(%o2)
-       ATOMIC_PRE_BARRIER
 1:     ldx     [%o1], %g1
        sub     %g1, %o0, %g7
        casx    [%o1], %g1, %g7
@@ -157,7 +132,6 @@ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
        bne,pn  %xcc, 2f
         sub    %g7, %o0, %g7
        mov     %g7, %o0
-       ATOMIC_POST_BARRIER
        retl
         nop
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
similarity index 84%
rename from arch/sparc64/lib/bitops.S
rename to arch/sparc/lib/bitops.S
index 6b015a6eefb50017e7d22b2d1edc0637408a1fef..2b7228cb8c2209332000d429257f16f4dbcf730f 100644 (file)
@@ -8,29 +8,10 @@
 
        .text
 
-       /* On SMP we need to use memory barriers to ensure
-        * correct memory operation ordering, nop these out
-        * for uniprocessor.
-        */
-
-#ifdef CONFIG_SMP
-#define BITOP_PRE_BARRIER      membar #StoreLoad | #LoadLoad
-#define BITOP_POST_BARRIER     \
-       ba,pt   %xcc, 80b;      \
-       membar #StoreLoad | #StoreStore
-
-80:    retl
-        nop
-#else
-#define BITOP_PRE_BARRIER
-#define BITOP_POST_BARRIER
-#endif
-
        .globl  test_and_set_bit
        .type   test_and_set_bit,#function
 test_and_set_bit:      /* %o0=nr, %o1=addr */
        BACKOFF_SETUP(%o3)
-       BITOP_PRE_BARRIER
        srlx    %o0, 6, %g1
        mov     1, %o2
        sllx    %g1, 3, %g3
@@ -45,7 +26,6 @@ test_and_set_bit:     /* %o0=nr, %o1=addr */
         and    %g7, %o2, %g2
        clr     %o0
        movrne  %g2, 1, %o0
-       BITOP_POST_BARRIER
        retl
         nop
 2:     BACKOFF_SPIN(%o3, %o4, 1b)
@@ -55,7 +35,6 @@ test_and_set_bit:     /* %o0=nr, %o1=addr */
        .type   test_and_clear_bit,#function
 test_and_clear_bit:    /* %o0=nr, %o1=addr */
        BACKOFF_SETUP(%o3)
-       BITOP_PRE_BARRIER
        srlx    %o0, 6, %g1
        mov     1, %o2
        sllx    %g1, 3, %g3
@@ -70,7 +49,6 @@ test_and_clear_bit:   /* %o0=nr, %o1=addr */
         and    %g7, %o2, %g2
        clr     %o0
        movrne  %g2, 1, %o0
-       BITOP_POST_BARRIER
        retl
         nop
 2:     BACKOFF_SPIN(%o3, %o4, 1b)
@@ -80,7 +58,6 @@ test_and_clear_bit:   /* %o0=nr, %o1=addr */
        .type   test_and_change_bit,#function
 test_and_change_bit:   /* %o0=nr, %o1=addr */
        BACKOFF_SETUP(%o3)
-       BITOP_PRE_BARRIER
        srlx    %o0, 6, %g1
        mov     1, %o2
        sllx    %g1, 3, %g3
@@ -95,7 +72,6 @@ test_and_change_bit:  /* %o0=nr, %o1=addr */
         and    %g7, %o2, %g2
        clr     %o0
        movrne  %g2, 1, %o0
-       BITOP_POST_BARRIER
        retl
         nop
 2:     BACKOFF_SPIN(%o3, %o4, 1b)
index cb4bdb0cc2afe3798674066209995d8cf98072c9..efa106c41ed0af7777d4a6a8b70ca9585d9b694d 100644 (file)
-       .text
-       .align 4
-       .global __memcmp, memcmp
-__memcmp:
-memcmp:
-#if 1
-       cmp     %o2, 0
-       ble     L3
-        mov    0, %g3
-L5:
-       ldub    [%o0], %g2
-       ldub    [%o1], %g3
-       sub     %g2, %g3, %g2
-       mov     %g2, %g3
-       sll     %g2, 24, %g2
-
-       cmp     %g2, 0
-       bne     L3
-        add    %o0, 1, %o0
+/* Sparc optimized memcmp code.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 2000, 2008 David S. Miller (davem@davemloft.net)
+ */
 
-       add     %o2, -1, %o2
+#include <linux/linkage.h>
+#include <asm/asm.h>
 
+       .text
+ENTRY(memcmp)
        cmp     %o2, 0
-       bg      L5
-        add    %o1, 1, %o1
-L3:
-       sll     %g3, 24, %o0
-       sra     %o0, 24, %o0
-
-       retl
+1:     BRANCH32(be, pn, 2f)
         nop
-#else
-       save    %sp, -104, %sp
-       mov     %i2, %o4
-       mov     %i0, %o0
-
-       cmp     %o4, 15
-       ble     L72
-        mov    %i1, %i2
-
-       andcc   %i2, 3, %g0
-       be      L161
-        andcc  %o0, 3, %g2
-L75:
-       ldub    [%o0], %g3
-       ldub    [%i2], %g2
-       add     %o0,1, %o0
-
-       subcc   %g3, %g2, %i0
-       bne     L156
-        add    %i2, 1, %i2
-
-       andcc   %i2, 3, %g0
-       bne     L75
-        add    %o4, -1, %o4
-
-       andcc   %o0, 3, %g2
-L161:
-       bne,a   L78
-        mov    %i2, %i1
-
-       mov     %o0, %i5
-       mov     %i2, %i3
-       srl     %o4, 2, %i4
-
-       cmp     %i4, 0
-       bge     L93
-        mov    %i4, %g2
-
-       add %i4, 3, %g2
-L93:
-       sra     %g2, 2, %g2
-       sll     %g2, 2, %g2
-       sub     %i4, %g2, %g2
-
-       cmp     %g2, 1
-       be,a    L88
-        add    %o0, 4, %i5
-
-       bg      L94
-        cmp    %g2, 2
-
-       cmp     %g2, 0
-       be,a    L86
-        ld     [%o0], %g3
-
-       b       L162
-        ld     [%i5], %g3
-L94:
-       be      L81
-        cmp    %g2, 3
-
-       be,a    L83
-        add    %o0, -4, %i5
-
-       b       L162
-        ld     [%i5], %g3
-L81:
-       add     %o0, -8, %i5
-       ld      [%o0], %g3
-       add     %i2, -8, %i3
-       ld      [%i2], %g2
-
-       b       L82
-        add    %i4, 2, %i4
-L83:
-       ld      [%o0], %g4
-       add     %i2, -4, %i3
-       ld      [%i2], %g1
-
-       b       L84
-        add    %i4, 1, %i4
-L86:
-       b       L87
-        ld     [%i2], %g2
-L88:
-       add     %i2, 4, %i3
-       ld      [%o0], %g4
-       add     %i4, -1, %i4
-       ld      [%i2], %g1
-L95:
-       ld      [%i5], %g3
-L162:
-       cmp     %g4, %g1
-       be      L87
-        ld     [%i3], %g2
-
-       cmp     %g4, %g1
-L163:
-       bleu    L114
-        mov    -1, %i0
-
-       b       L114
-        mov    1, %i0
-L87:
-       ld      [%i5 + 4], %g4
-       cmp     %g3, %g2
-       bne     L163
-        ld     [%i3 + 4], %g1
-L84:
-       ld      [%i5 + 8], %g3
-
-       cmp     %g4, %g1
-       bne     L163
-        ld     [%i3 + 8], %g2
-L82:
-       ld      [%i5 + 12], %g4
-       cmp     %g3, %g2
-       bne     L163
-        ld     [%i3 + 12], %g1
-
-       add     %i5, 16, %i5
-
-       addcc   %i4, -4, %i4
-       bne     L95
-        add    %i3, 16, %i3
-
-       cmp     %g4, %g1
-       bne     L163
-        nop
-
-       b       L114
-        mov    0, %i0
-L78:
-       srl     %o4, 2, %i0
-       and     %o0, -4, %i3
-       orcc    %i0, %g0, %g3
-       sll     %g2, 3, %o7
-       mov     32, %g2
-
-       bge     L129
-        sub    %g2, %o7, %o1
-
-       add     %i0, 3, %g3
-L129:
-       sra     %g3, 2, %g2
-       sll     %g2, 2, %g2
-       sub     %i0, %g2, %g2
-
-       cmp     %g2, 1
-       be,a    L124
-        ld     [%i3], %o3
-
-       bg      L130
-        cmp    %g2, 2
-
-       cmp     %g2, 0
-       be,a    L122
-        ld     [%i3], %o2
-
-       b       L164
-       sll     %o3, %o7, %g3
-L130:
-       be      L117
-        cmp    %g2, 3
-
-       be,a    L119
-        ld     [%i3], %g1
-
-       b       L164
-        sll    %o3, %o7, %g3
-L117:
-       ld      [%i3], %g4
-       add     %i2, -8, %i1
-       ld      [%i3 + 4], %o3
-       add     %i0, 2, %i0
-       ld      [%i2], %i4
-
-       b       L118
-        add    %i3, -4, %i3
-L119:
-       ld      [%i3 + 4], %g4
-       add     %i2, -4, %i1
-       ld      [%i2], %i5
-
-       b       L120
-        add    %i0, 1, %i0
-L122:
-       ld      [%i3 + 4], %g1
-       ld      [%i2], %i4
-
-       b       L123
-        add    %i3, 4, %i3
-L124:
-       add     %i2, 4, %i1
-       ld      [%i3 + 4], %o2
-       add     %i0, -1, %i0
-       ld      [%i2], %i5
-       add     %i3, 8, %i3
-L131:
-       sll     %o3, %o7, %g3
-L164:
-       srl     %o2, %o1, %g2
-       ld      [%i3], %g1
-       or      %g3, %g2, %g3
-
-       cmp     %g3, %i5
-       bne     L163
-        ld     [%i1], %i4
-L123:
-       sll     %o2, %o7, %g3
-       srl     %g1, %o1, %g2
-       ld      [%i3 + 4], %g4
-       or      %g3, %g2, %g3
-
-       cmp     %g3, %i4
-       bne     L163
-        ld     [%i1 + 4], %i5
-L120:
-       sll     %g1, %o7, %g3
-       srl     %g4, %o1, %g2
-       ld      [%i3 + 8], %o3
-       or      %g3, %g2, %g3
-
-       cmp     %g3, %i5
-       bne     L163
-        ld     [%i1 + 8], %i4
-L118:
-       sll     %g4, %o7, %g3
-       srl     %o3, %o1, %g2
-       ld      [%i3 + 12], %o2
-       or      %g3, %g2, %g3
-
-       cmp     %g3, %i4
-       bne     L163
-        ld     [%i1 + 12], %i5
-
-       add     %i3, 16, %i3
-       addcc   %i0, -4, %i0
-       bne     L131
-        add    %i1, 16, %i1
-
-       sll     %o3, %o7, %g3
-       srl     %o2, %o1, %g2
-       or      %g3, %g2, %g3
-
-       cmp     %g3, %i5
-       be,a    L114
-        mov    0, %i0
-
-       b,a L163
-L114:
-       cmp     %i0, 0
-       bne     L156
-        and    %o4, -4, %g2
-
-       add     %o0, %g2, %o0
-       add     %i2, %g2, %i2
-       and     %o4, 3, %o4
-L72:
-       cmp     %o4, 0
-       be      L156
-        mov    0, %i0
-
-       ldub    [%o0], %g3
-L165:
-       ldub    [%i2], %g2
+       ldub    [%o0], %g7
+       ldub    [%o1], %g3
+       sub     %o2, 1, %o2
        add     %o0, 1, %o0
-
-       subcc   %g3, %g2, %i0
-       bne     L156
-        add    %i2, 1, %i2
-
-       addcc   %o4, -1, %o4
-       bne,a   L165
-        ldub   [%o0], %g3
-
-       mov     0, %i0
-L156:
-       ret
-       restore
-#endif
+       add     %o1, 1, %o1
+       subcc   %g7, %g3, %g3
+       BRANCH32(be, pt, 1b)
+        cmp    %o2, 0
+       retl
+        mov    %g3, %o0
+2:     retl
+        mov    0, %o0
+ENDPROC(memcmp)
similarity index 92%
rename from arch/sparc64/lib/rwsem.S
rename to arch/sparc/lib/rwsem_64.S
index 1a4cc5654de4668a5d751644115031e87555a53d..91a7d29a79d55cd295651dc1115b7c12f842060b 100644 (file)
@@ -17,7 +17,6 @@ __down_read:
        bne,pn          %icc, 1b
         add            %g7, 1, %g7
        cmp             %g7, 0
-       membar          #StoreLoad | #StoreStore
        bl,pn           %icc, 3f
         nop
 2:
@@ -42,7 +41,6 @@ __down_read_trylock:
        cmp             %g1, %g7
        bne,pn          %icc, 1b
         mov            1, %o1
-       membar          #StoreLoad | #StoreStore
 2:     retl
         mov            %o1, %o0
        .size           __down_read_trylock, .-__down_read_trylock
@@ -58,7 +56,6 @@ __down_write:
        cmp             %g3, %g7
        bne,pn          %icc, 1b
         cmp            %g7, 0
-       membar          #StoreLoad | #StoreStore
        bne,pn          %icc, 3f
         nop
 2:     retl
@@ -85,7 +82,6 @@ __down_write_trylock:
        cmp             %g3, %g7
        bne,pn          %icc, 1b
         mov            1, %o1
-       membar          #StoreLoad | #StoreStore
 2:     retl
         mov            %o1, %o0
        .size           __down_write_trylock, .-__down_write_trylock
@@ -99,7 +95,6 @@ __up_read:
        cmp             %g1, %g7
        bne,pn          %icc, 1b
         cmp            %g7, 0
-       membar          #StoreLoad | #StoreStore
        bl,pn           %icc, 3f
         nop
 2:     retl
@@ -129,7 +124,6 @@ __up_write:
        bne,pn          %icc, 1b
         sub            %g7, %g1, %g7
        cmp             %g7, 0
-       membar          #StoreLoad | #StoreStore
        bl,pn           %icc, 3f
         nop
 2:
@@ -155,7 +149,6 @@ __downgrade_write:
        bne,pn          %icc, 1b
         sub            %g7, %g1, %g7
        cmp             %g7, 0
-       membar          #StoreLoad | #StoreStore
        bl,pn           %icc, 3f
         nop
 2:
index ed9a763368cd4f54632e2ad6e40472d53a3f889c..536f83507fbff1dc669f998f5e6b7336812225a5 100644 (file)
@@ -1,51 +1,40 @@
 /* strlen.S: Sparc optimized strlen code
  * Hand optimized from GNU libc's strlen
  * Copyright (C) 1991,1996 Free Software Foundation
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  */
 
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
 #define LO_MAGIC 0x01010101
 #define HI_MAGIC 0x80808080
 
-0:
+       .text
+ENTRY(strlen)
+       mov     %o0, %o1
+       andcc   %o0, 3, %g0
+       BRANCH32(be, pt, 9f)
+        sethi  %hi(HI_MAGIC), %o4
        ldub    [%o0], %o5
-       cmp     %o5, 0
-       be      1f
+       BRANCH_REG_ZERO(pn, %o5, 11f)
         add    %o0, 1, %o0
        andcc   %o0, 3, %g0
-       be      4f
+       BRANCH32(be, pn, 4f)
         or     %o4, %lo(HI_MAGIC), %o3
        ldub    [%o0], %o5
-       cmp     %o5, 0
-       be      2f
+       BRANCH_REG_ZERO(pn, %o5, 12f)
         add    %o0, 1, %o0
        andcc   %o0, 3, %g0
-       be      5f
+       BRANCH32(be, pt, 5f)
         sethi  %hi(LO_MAGIC), %o4
        ldub    [%o0], %o5
-       cmp     %o5, 0
-       be      3f
+       BRANCH_REG_ZERO(pn, %o5, 13f)
         add    %o0, 1, %o0
-       b       8f
+       BRANCH32(ba, pt, 8f)
         or     %o4, %lo(LO_MAGIC), %o2
-1:
-       retl
-        mov    0, %o0
-2:
-       retl
-        mov    1, %o0
-3:
-       retl
-        mov    2, %o0
-
-       .align 4
-       .global strlen
-strlen:
-       mov     %o0, %o1
-       andcc   %o0, 3, %g0
-       bne     0b
-        sethi  %hi(HI_MAGIC), %o4
+9:
        or      %o4, %lo(HI_MAGIC), %o3
 4:
        sethi   %hi(LO_MAGIC), %o4
@@ -56,26 +45,36 @@ strlen:
 2:
        sub     %o5, %o2, %o4
        andcc   %o4, %o3, %g0
-       be      8b
+       BRANCH32(be, pt, 8b)
         add    %o0, 4, %o0
 
        /* Check every byte. */
-       srl     %o5, 24, %g5
-       andcc   %g5, 0xff, %g0
-       be      1f
+       srl     %o5, 24, %g7
+       andcc   %g7, 0xff, %g0
+       BRANCH32(be, pn, 1f)
         add    %o0, -4, %o4
-       srl     %o5, 16, %g5
-       andcc   %g5, 0xff, %g0
-       be      1f
+       srl     %o5, 16, %g7
+       andcc   %g7, 0xff, %g0
+       BRANCH32(be, pn, 1f)
         add    %o4, 1, %o4
-       srl     %o5, 8, %g5
-       andcc   %g5, 0xff, %g0
-       be      1f
+       srl     %o5, 8, %g7
+       andcc   %g7, 0xff, %g0
+       BRANCH32(be, pn, 1f)
         add    %o4, 1, %o4
        andcc   %o5, 0xff, %g0
-       bne,a   2b
+       BRANCH32_ANNUL(bne, pt, 2b)
         ld     [%o0], %o5
        add     %o4, 1, %o4
 1:
        retl
         sub    %o4, %o1, %o0
+11:
+       retl
+        mov    0, %o0
+12:
+       retl
+        mov    1, %o0
+13:
+       retl
+        mov    2, %o0
+ENDPROC(strlen)
similarity index 100%
rename from arch/sparc64/lib/xor.S
rename to arch/sparc/lib/xor.S
index 8136987977f45ce598f02f41c65a168254b59270..b9085ecbb27b7f1f8a373af9cd0504f5bde1b0a0 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the FPU instruction emulation.
 #
 
-obj-y    := math.o
+# supress all warnings - as math.c produces a lot!
+ccflags-y := -w
 
-EXTRA_AFLAGS := -ansi
-EXTRA_CFLAGS = -I. -Iinclude/math-emu -w
+obj-y    := math_$(BITS).o
diff --git a/arch/sparc/math-emu/ashldi3.S b/arch/sparc/math-emu/ashldi3.S
deleted file mode 100644 (file)
index 7230ff5..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * ashldi3.S:  Math-emu code creates all kinds of references to
- *              this little routine on the sparc with gcc.
- *
- * Copyright (C) 1998 Jakub Jelinek(jj@ultra.linux.cz)
- */
-
-#include <asm/cprefix.h>
-
-       .globl C_LABEL(__ashldi3)
-C_LABEL(__ashldi3):
-       tst     %o2
-       be      3f
-        mov    32, %g2
-
-       sub     %g2, %o2, %g2
-
-       tst     %g2
-       bg      1f
-        srl    %o1, %g2, %g3
-
-       clr     %o5
-       neg     %g2
-       ba      2f
-        sll    %o1, %g2, %o4
-
-1:
-       sll     %o1, %o2, %o5
-       srl     %o0, %o2, %g2
-       or      %g2, %g3, %o4
-2:
-       mov     %o4, %o0
-       mov     %o5, %o1
-3:
-       jmpl    %o7 + 8, %g0
-        nop
similarity index 99%
rename from arch/sparc/math-emu/math.c
rename to arch/sparc/math-emu/math_32.c
index 8613b3eb877c738ed71adcec2281865bcef924c2..e13f65da17dfd90875e2da6e9e29c609e21ee3a3 100644 (file)
@@ -69,7 +69,7 @@
 #include <linux/mm.h>
 #include <asm/uaccess.h>
 
-#include "sfp-util.h"
+#include "sfp-util_32.h"
 #include <math-emu/soft-fp.h>
 #include <math-emu/single.h>
 #include <math-emu/double.h>
similarity index 99%
rename from arch/sparc64/math-emu/math.c
rename to arch/sparc/math-emu/math_64.c
index add053e0f3b33fb7b87d29daa35eab815ec18115..6863c9bde25c918926d38dea78497f15f0ba2dbc 100644 (file)
@@ -16,7 +16,7 @@
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 
-#include "sfp-util.h"
+#include "sfp-util_64.h"
 #include <math-emu/soft-fp.h>
 #include <math-emu/single.h>
 #include <math-emu/double.h>
index ea88955d97ffe6f10ee6dd085e57ae593f0f54b1..681abe0a45941a2c4f7f73f9cdc7393be24fc584 100644 (file)
@@ -1,17 +1,25 @@
 # Makefile for the linux Sparc-specific parts of the memory manager.
 #
 
-EXTRA_AFLAGS := -ansi
+asflags-y := -ansi
+ccflags-y := -Werror
 
-obj-y  := fault.o init.o loadmmu.o generic.o extable.o btfixup.o \
-           srmmu.o iommu.o io-unit.o hypersparc.o viking.o tsunami.o swift.o
+obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o
+obj-y                   += fault_$(BITS).o
+obj-y                   += init_$(BITS).o
+obj-$(CONFIG_SPARC32)   += loadmmu.o
+obj-y                   += generic_$(BITS).o
+obj-$(CONFIG_SPARC32)   += extable.o btfixup.o srmmu.o iommu.o io-unit.o
+obj-$(CONFIG_SPARC32)   += hypersparc.o viking.o tsunami.o swift.o
 
-ifdef CONFIG_HIGHMEM
-obj-y  += highmem.o
-endif
+# Only used by sparc64
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+
+# Only used by sparc32
+obj-$(CONFIG_HIGHMEM)   += highmem.o
 
 ifdef CONFIG_SMP
-obj-y   += nosun4c.o
+obj-$(CONFIG_SPARC32) += nosun4c.o
 else
-obj-y   += sun4c.o
+obj-$(CONFIG_SPARC32) += sun4c.o
 endif
similarity index 97%
rename from arch/sparc/mm/init.c
rename to arch/sparc/mm/init_32.c
index 677c1e187a23e57b2fd2cf370b4aa1ae8a52ad4a..fec926021f4988e7d6799152eb42606c24600035 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/pagemap.h>
 #include <linux/poison.h>
 
+#include <asm/sections.h>
 #include <asm/system.h>
 #include <asm/vac-ops.h>
 #include <asm/page.h>
@@ -48,9 +49,6 @@ unsigned long sparc_unmapped_base;
 
 struct pgtable_cache_struct pgt_quicklists;
 
-/* References to section boundaries */
-extern char __init_begin, __init_end, _start, _end, etext , edata;
-
 /* Initial ramdisk setup */
 extern unsigned int sparc_ramdisk_image;
 extern unsigned int sparc_ramdisk_size;
@@ -450,9 +448,9 @@ void __init mem_init(void)
        
        totalram_pages += totalhigh_pages;
 
-       codepages = (((unsigned long) &etext) - ((unsigned long)&_start));
+       codepages = (((unsigned long) &_etext) - ((unsigned long)&_start));
        codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
-       datapages = (((unsigned long) &edata) - ((unsigned long)&etext));
+       datapages = (((unsigned long) &_edata) - ((unsigned long)&_etext));
        datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
        initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin));
        initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
@@ -476,8 +474,10 @@ void __init mem_init(void)
 void free_initmem (void)
 {
        unsigned long addr;
+       unsigned long freed;
 
        addr = (unsigned long)(&__init_begin);
+       freed = (unsigned long)(&__init_end) - addr;
        for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
                struct page *p;
 
@@ -490,8 +490,8 @@ void free_initmem (void)
                totalram_pages++;
                num_physpages++;
        }
-       printk(KERN_INFO "Freeing unused kernel memory: %dk freed\n",
-               (&__init_end - &__init_begin) >> 10);
+       printk(KERN_INFO "Freeing unused kernel memory: %ldk freed\n",
+               freed >> 10);
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
similarity index 99%
rename from arch/sparc64/mm/init.c
rename to arch/sparc/mm/init_64.c
index 185f34679110c3e373c99c8eaa3e569e5170c338..6ea73da29312e7397363a1c05237453e7e725c49 100644 (file)
@@ -50,7 +50,7 @@
 #include <asm/cpudata.h>
 #include <asm/irq.h>
 
-#include "init.h"
+#include "init_64.h"
 
 unsigned long kern_linear_pte_xor[2] __read_mostly;
 
@@ -214,7 +214,6 @@ static inline void set_dcache_dirty(struct page *page, int this_cpu)
                             "or        %%g1, %0, %%g1\n\t"
                             "casx      [%2], %%g7, %%g1\n\t"
                             "cmp       %%g7, %%g1\n\t"
-                            "membar    #StoreLoad | #StoreStore\n\t"
                             "bne,pn    %%xcc, 1b\n\t"
                             " nop"
                             : /* no outputs */
@@ -236,7 +235,6 @@ static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
                             " andn     %%g7, %1, %%g1\n\t"
                             "casx      [%2], %%g7, %%g1\n\t"
                             "cmp       %%g7, %%g1\n\t"
-                            "membar    #StoreLoad | #StoreStore\n\t"
                             "bne,pn    %%xcc, 1b\n\t"
                             " nop\n"
                             "2:"
@@ -956,7 +954,7 @@ int of_node_to_nid(struct device_node *dp)
        return nid;
 }
 
-static void __init add_node_ranges(void)
+static void add_node_ranges(void)
 {
        int i;
 
index daadf5f88050bb8a11ab728d8a2460233e2914ce..005e758a4db7fe9cb381c3ddd3955137298e4fad 100644 (file)
@@ -156,8 +156,8 @@ static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int
        spin_lock_irqsave(&iounit->lock, flags);
        while (sz != 0) {
                --sz;
-               sg->dvma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
-               sg->dvma_length = sg->length;
+               sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
+               sg->dma_length = sg->length;
                sg = sg_next(sg);
        }
        spin_unlock_irqrestore(&iounit->lock, flags);
@@ -186,8 +186,8 @@ static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg,
        spin_lock_irqsave(&iounit->lock, flags);
        while (sz != 0) {
                --sz;
-               len = ((sg->dvma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
-               vaddr = (sg->dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
+               len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+               vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
                IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
                for (len += vaddr; vaddr < len; vaddr++)
                        clear_bit(vaddr, iounit->bmap);
index e7a499e3aa3caee28e1622024c885369d876a149..b2e6e73888b52e594e0f7de258c4de76a346d8bd 100644 (file)
@@ -245,8 +245,8 @@ static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *s
        while (sz != 0) {
                --sz;
                n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
-               sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
-               sg->dvma_length = (__u32) sg->length;
+               sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
+               sg->dma_length = sg->length;
                sg = sg_next(sg);
        }
 }
@@ -259,8 +259,8 @@ static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg
        while (sz != 0) {
                --sz;
                n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
-               sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
-               sg->dvma_length = (__u32) sg->length;
+               sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
+               sg->dma_length = sg->length;
                sg = sg_next(sg);
        }
 }
@@ -290,8 +290,8 @@ static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg
                        }
                }
 
-               sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
-               sg->dvma_length = (__u32) sg->length;
+               sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
+               sg->dma_length = sg->length;
                sg = sg_next(sg);
        }
 }
@@ -330,8 +330,8 @@ static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, i
                --sz;
 
                n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
-               iommu_release_one(dev, sg->dvma_address & PAGE_MASK, n);
-               sg->dvma_address = 0x21212121;
+               iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
+               sg->dma_address = 0x21212121;
                sg = sg_next(sg);
        }
 }
index dd8aa36f366c909fe4ca13e3eab297f7700cb127..fe7ed08390bb720e1c4be141aa5a072f58b843ec 100644 (file)
@@ -1312,10 +1312,8 @@ void __init srmmu_paging_init(void)
 #endif
        poke_srmmu();
 
-#ifdef CONFIG_SUN_IO
        srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
        srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
-#endif
 
        srmmu_allocate_ptable_skeleton(
                __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
@@ -1916,18 +1914,6 @@ static void __cpuinit poke_viking(void)
        mreg |= VIKING_SBENABLE;
        mreg &= ~(VIKING_ACENABLE);
        srmmu_set_mmureg(mreg);
-
-#ifdef CONFIG_SMP
-       /* Avoid unnecessary cross calls. */
-       BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
-       BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
-       BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
-       BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
-       BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram);
-       BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
-       BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
-       btfixup();
-#endif
 }
 
 static void __init init_viking(void)
@@ -2272,6 +2258,17 @@ void __init ld_mmu_srmmu(void)
        BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
+
+       if (poke_srmmu == poke_viking) {
+               /* Avoid unnecessary cross calls. */
+               BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
+               BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
+               BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
+               BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
+               BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram);
+               BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
+               BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
+       }
 #endif
 
        if (sparc_cpu_model == sun4d)
index fe65aeeb3947a70fafb900fb94014311f43a59df..2ffacd67c42403393013bd73cd191eddbdc3b22f 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/seq_file.h>
 #include <linux/scatterlist.h>
 
+#include <asm/sections.h>
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
@@ -240,9 +241,7 @@ void sun4c_complete_all_stores(void)
 
        _unused = sun4c_get_context();
        sun4c_set_context(_unused);
-#ifdef CONFIG_SUN_AUXIO
        _unused = get_auxio();
-#endif
 }
 
 /* Bootup utility functions. */
@@ -1124,8 +1123,8 @@ static void sun4c_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int s
 {
        while (sz != 0) {
                --sz;
-               sg->dvma_address = (__u32)sun4c_lockarea(sg_virt(sg), sg->length);
-               sg->dvma_length = sg->length;
+               sg->dma_address = (__u32)sun4c_lockarea(sg_virt(sg), sg->length);
+               sg->dma_length = sg->length;
                sg = sg_next(sg);
        }
 }
@@ -1141,7 +1140,7 @@ static void sun4c_release_scsi_sgl(struct device *dev, struct scatterlist *sg, i
 {
        while (sz != 0) {
                --sz;
-               sun4c_unlockarea((char *)sg->dvma_address, sg->length);
+               sun4c_unlockarea((char *)sg->dma_address, sg->length);
                sg = sg_next(sg);
        }
 }
@@ -1953,7 +1952,6 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p
 }
 
 extern void sparc_context_init(int);
-extern unsigned long end;
 extern unsigned long bootmem_init(unsigned long *pages_avail);
 extern unsigned long last_valid_pfn;
 
@@ -1964,7 +1962,7 @@ void __init sun4c_paging_init(void)
        extern struct resource sparc_iomap;
        unsigned long end_pfn, pages_avail;
 
-       kernel_end = (unsigned long) &end;
+       kernel_end = (unsigned long) &_end;
        kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
 
        pages_avail = 0;
similarity index 100%
rename from arch/sparc64/mm/tlb.c
rename to arch/sparc/mm/tlb.c
similarity index 97%
rename from arch/sparc64/mm/tsb.c
rename to arch/sparc/mm/tsb.c
index 587f8efb2e05efb0681fec3a0c0ed0d59b87357b..36a0813f9517b39ec9b7b86c17a03233d442483e 100644 (file)
@@ -41,10 +41,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
                                              KERNEL_TSB_NENTRIES);
                struct tsb *ent = &swapper_tsb[hash];
 
-               if (tag_compare(ent->tag, v)) {
+               if (tag_compare(ent->tag, v))
                        ent->tag = (1UL << TSB_TAG_INVALID_BIT);
-                       membar_storeload_storestore();
-               }
        }
 }
 
@@ -267,6 +265,18 @@ void __init pgtable_cache_init(void)
        }
 }
 
+int sysctl_tsb_ratio = -2;
+
+static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
+{
+       unsigned long num_ents = (new_size / sizeof(struct tsb));
+
+       if (sysctl_tsb_ratio < 0)
+               return num_ents - (num_ents >> -sysctl_tsb_ratio);
+       else
+               return num_ents + (num_ents >> sysctl_tsb_ratio);
+}
+
 /* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
  * do_sparc64_fault() invokes this routine to try and grow it.
  *
@@ -297,19 +307,14 @@ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
 
        new_cache_index = 0;
        for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
-               unsigned long n_entries = new_size / sizeof(struct tsb);
-
-               n_entries = (n_entries * 3) / 4;
-               if (n_entries > rss)
+               new_rss_limit = tsb_size_to_rss_limit(new_size);
+               if (new_rss_limit > rss)
                        break;
-
                new_cache_index++;
        }
 
        if (new_size == max_tsb_size)
                new_rss_limit = ~0UL;
-       else
-               new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4;
 
 retry_tsb_alloc:
        gfp_flags = GFP_KERNEL;
similarity index 99%
rename from arch/sparc64/mm/ultra.S
rename to arch/sparc/mm/ultra.S
index 86773e89dc1bbf29cf71831948e6d79b5d43188b..80c788ec7c321ef7738b005eb57f00e2f89e13fd 100644 (file)
@@ -125,7 +125,6 @@ __spitfire_flush_tlb_mm_slow:
        .align          32
        .globl          __flush_icache_page
 __flush_icache_page:   /* %o0 = phys_page */
-       membar          #StoreStore
        srlx            %o0, PAGE_SHIFT, %o0
        sethi           %uhi(PAGE_OFFSET), %g1
        sllx            %o0, PAGE_SHIFT, %o0
@@ -467,7 +466,7 @@ xcall_sync_tick:
        .previous
 
        rdpr            %pil, %g2
-       wrpr            %g0, 15, %pil
+       wrpr            %g0, PIL_NORMAL_MAX, %pil
        sethi           %hi(109f), %g7
        b,pt            %xcc, etrap_irq
 109:    or             %g7, %lo(109b), %g7
@@ -507,7 +506,6 @@ xcall_fetch_glob_regs:
        sllx            %g2, TRAP_BLOCK_SZ_SHIFT, %g2
        add             %g7, %g2, %g7
        ldx             [%g7 + TRAP_PER_CPU_THREAD], %g3
-       membar          #StoreStore
        stx             %g3, [%g1 + GR_SNAP_THREAD]
        retry
 
@@ -690,7 +688,7 @@ xcall_kgdb_capture:
        .previous
 
        rdpr            %pil, %g2
-       wrpr            %g0, 15, %pil
+       wrpr            %g0, PIL_NORMAL_MAX, %pil
        sethi           %hi(109f), %g7
        ba,pt           %xcc, etrap_irq
 109:    or             %g7, %lo(109b), %g7
index 17bb6035069b7158286ebd50462aac61d07507f4..d6e170c074fc5023b1befcbd7e9525e44f7ad96a 100644 (file)
 #include <linux/errno.h>
 #include <linux/init.h>
  
+#ifdef CONFIG_SPARC64
+#include <asm/hypervisor.h>
+#include <asm/spitfire.h>
+#include <asm/cpudata.h>
+#include <asm/irq.h>
+
+static int nmi_enabled;
+
+struct pcr_ops {
+       u64 (*read)(void);
+       void (*write)(u64);
+};
+static const struct pcr_ops *pcr_ops;
+
+static u64 direct_pcr_read(void)
+{
+       u64 val;
+
+       read_pcr(val);
+       return val;
+}
+
+static void direct_pcr_write(u64 val)
+{
+       write_pcr(val);
+}
+
+static const struct pcr_ops direct_pcr_ops = {
+       .read   = direct_pcr_read,
+       .write  = direct_pcr_write,
+};
+
+static void n2_pcr_write(u64 val)
+{
+       unsigned long ret;
+
+       ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
+       if (val != HV_EOK)
+               write_pcr(val);
+}
+
+static const struct pcr_ops n2_pcr_ops = {
+       .read   = direct_pcr_read,
+       .write  = n2_pcr_write,
+};
+
+/* In order to commonize as much of the implementation as
+ * possible, we use PICH as our counter.  Mostly this is
+ * to accomodate Niagara-1 which can only count insn cycles
+ * in PICH.
+ */
+static u64 picl_value(void)
+{
+       u32 delta = local_cpu_data().clock_tick / HZ;
+
+       return ((u64)((0 - delta) & 0xffffffff)) << 32;
+}
+
+#define PCR_PIC_PRIV           0x00000001 /* PIC access is privileged */
+#define PCR_STRACE             0x00000002 /* Trace supervisor events  */
+#define PCR_UTRACE             0x00000004 /* Trace user events        */
+#define PCR_N2_HTRACE          0x00000008 /* Trace hypervisor events  */
+#define PCR_N2_TOE_OV0         0x00000010 /* Trap if PIC 0 overflows  */
+#define PCR_N2_TOE_OV1         0x00000020 /* Trap if PIC 1 overflows  */
+#define PCR_N2_MASK0           0x00003fc0
+#define PCR_N2_MASK0_SHIFT     6
+#define PCR_N2_SL0             0x0003c000
+#define PCR_N2_SL0_SHIFT       14
+#define PCR_N2_OV0             0x00040000
+#define PCR_N2_MASK1           0x07f80000
+#define PCR_N2_MASK1_SHIFT     19
+#define PCR_N2_SL1             0x78000000
+#define PCR_N2_SL1_SHIFT       27
+#define PCR_N2_OV1             0x80000000
+
+#define PCR_SUN4U_ENABLE       (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
+#define PCR_N2_ENABLE          (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
+                                PCR_N2_TOE_OV1 | \
+                                (2 << PCR_N2_SL1_SHIFT) | \
+                                (0xff << PCR_N2_MASK1_SHIFT))
+
+static u64 pcr_enable = PCR_SUN4U_ENABLE;
+
+static void nmi_handler(struct pt_regs *regs)
+{
+       pcr_ops->write(PCR_PIC_PRIV);
+
+       if (nmi_enabled) {
+               oprofile_add_sample(regs, 0);
+
+               write_pic(picl_value());
+               pcr_ops->write(pcr_enable);
+       }
+}
+
+/* We count "clock cycle" events in the lower 32-bit PIC.
+ * Then configure it such that it overflows every HZ, and thus
+ * generates a level 15 interrupt at that frequency.
+ */
+static void cpu_nmi_start(void *_unused)
+{
+       pcr_ops->write(PCR_PIC_PRIV);
+       write_pic(picl_value());
+
+       pcr_ops->write(pcr_enable);
+}
+
+static void cpu_nmi_stop(void *_unused)
+{
+       pcr_ops->write(PCR_PIC_PRIV);
+}
+
+static int nmi_start(void)
+{
+       int err = register_perfctr_intr(nmi_handler);
+
+       if (!err) {
+               nmi_enabled = 1;
+               wmb();
+               err = on_each_cpu(cpu_nmi_start, NULL, 1);
+               if (err) {
+                       nmi_enabled = 0;
+                       wmb();
+                       on_each_cpu(cpu_nmi_stop, NULL, 1);
+                       release_perfctr_intr(nmi_handler);
+               }
+       }
+
+       return err;
+}
+
+static void nmi_stop(void)
+{
+       nmi_enabled = 0;
+       wmb();
+
+       on_each_cpu(cpu_nmi_stop, NULL, 1);
+       release_perfctr_intr(nmi_handler);
+       synchronize_sched();
+}
+
+static unsigned long perf_hsvc_group;
+static unsigned long perf_hsvc_major;
+static unsigned long perf_hsvc_minor;
+
+static int __init register_perf_hsvc(void)
+{
+       if (tlb_type == hypervisor) {
+               switch (sun4v_chip_type) {
+               case SUN4V_CHIP_NIAGARA1:
+                       perf_hsvc_group = HV_GRP_NIAG_PERF;
+                       break;
+
+               case SUN4V_CHIP_NIAGARA2:
+                       perf_hsvc_group = HV_GRP_N2_CPU;
+                       break;
+
+               default:
+                       return -ENODEV;
+               }
+
+
+               perf_hsvc_major = 1;
+               perf_hsvc_minor = 0;
+               if (sun4v_hvapi_register(perf_hsvc_group,
+                                        perf_hsvc_major,
+                                        &perf_hsvc_minor)) {
+                       printk("perfmon: Could not register N2 hvapi.\n");
+                       return -ENODEV;
+               }
+       }
+       return 0;
+}
+
+static void unregister_perf_hsvc(void)
+{
+       if (tlb_type != hypervisor)
+               return;
+       sun4v_hvapi_unregister(perf_hsvc_group);
+}
+
+static int oprofile_nmi_init(struct oprofile_operations *ops)
+{
+       int err = register_perf_hsvc();
+
+       if (err)
+               return err;
+
+       switch (tlb_type) {
+       case hypervisor:
+               pcr_ops = &n2_pcr_ops;
+               pcr_enable = PCR_N2_ENABLE;
+               break;
+
+       case cheetah:
+       case cheetah_plus:
+               pcr_ops = &direct_pcr_ops;
+               break;
+
+       default:
+               return -ENODEV;
+       }
+
+       ops->create_files = NULL;
+       ops->setup = NULL;
+       ops->shutdown = NULL;
+       ops->start = nmi_start;
+       ops->stop = nmi_stop;
+       ops->cpu_type = "timer";
+
+       printk(KERN_INFO "oprofile: Using perfctr based NMI timer interrupt.\n");
+
+       return 0;
+}
+#endif
+
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
-       return -ENODEV;
+       int ret = -ENODEV;
+
+#ifdef CONFIG_SPARC64
+       ret = oprofile_nmi_init(ops);
+       if (!ret)
+               return ret;
+#endif
+
+       return ret;
 }
 
 
 void oprofile_arch_exit(void)
 {
+#ifdef CONFIG_SPARC64
+       unregister_perf_hsvc();
+#endif
 }
index 8f7e18546c974f6ba8fec1c6ec8befcc21d5d30d..1b8c073adb44fe341b883f87865b28531d61b95f 100644 (file)
@@ -1,6 +1,21 @@
 # Makefile for the Sun Boot PROM interface library under
 # Linux.
 #
+asflags := -ansi
+ccflags := -Werror
 
-lib-y := bootstr.o devmap.o devops.o init.o memory.o misc.o mp.o \
-        palloc.o ranges.o segment.o console.o printf.o tree.o
+lib-y                 := bootstr_$(BITS).o
+lib-$(CONFIG_SPARC32) += devmap.o
+lib-y                 += devops_$(BITS).o
+lib-y                 += init_$(BITS).o
+lib-$(CONFIG_SPARC32) += memory.o
+lib-y                 += misc_$(BITS).o
+lib-$(CONFIG_SPARC32) += mp.o
+lib-$(CONFIG_SPARC32) += palloc.o
+lib-$(CONFIG_SPARC32) += ranges.o
+lib-$(CONFIG_SPARC32) += segment.o
+lib-y                 += console_$(BITS).o
+lib-y                 += printf.o
+lib-y                 += tree_$(BITS).o
+lib-$(CONFIG_SPARC64) += p1275.o
+lib-$(CONFIG_SPARC64) += cif.o
similarity index 98%
rename from arch/sparc/prom/misc.c
rename to arch/sparc/prom/misc_32.c
index 49b5057b9601000660e42220eb5e49d565f83604..cf6c3f6d36c3d6bdecc41edc37aa3436e1a1726e 100644 (file)
@@ -61,9 +61,7 @@ prom_cmdline(void)
        restore_current();
        install_linux_ticker();
        spin_unlock_irqrestore(&prom_lock, flags);
-#ifdef CONFIG_SUN_AUXIO
        set_auxio(AUXIO_LED, 0);
-#endif
 }
 
 /* Drop into the prom, but completely terminate the program.
index a36ab9c5ee08758f0dbf6c667e039e6a1d403ee4..660943ee4c2ac7e431822dc1f99cf9e8bac294e7 100644 (file)
@@ -2,6 +2,7 @@
  * printf.c:  Internal prom library printf facility.
  *
  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  * Copyright (c) 2002 Pete Zaitcev (zaitcev@yahoo.com)
  *
  * We used to warn all over the code: DO NOT USE prom_printf(),
@@ -13,7 +14,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 
 #include <asm/openprom.h>
 #include <asm/oplib.h>
@@ -34,7 +34,7 @@ prom_write(const char *buf, unsigned int n)
 }
 
 void
-prom_printf(char *fmt, ...)
+prom_printf(const char *fmt, ...)
 {
        va_list args;
        int i;
@@ -45,4 +45,3 @@ prom_printf(char *fmt, ...)
 
        prom_write(ppbuf, i);
 }
-EXPORT_SYMBOL(prom_printf);
similarity index 97%
rename from arch/sparc/prom/tree.c
rename to arch/sparc/prom/tree_32.c
index f228fe057b243f80dacf0948038a2311cc0e9ac5..6d81873573311a1d7b4093ae185fd062e546ea32 100644 (file)
@@ -85,7 +85,7 @@ int prom_getsibling(int node)
 /* Return the length in bytes of property 'prop' at node 'node'.
  * Return -1 on error.
  */
-int prom_getproplen(int node, char *prop)
+int prom_getproplen(int node, const char *prop)
 {
        int ret;
        unsigned long flags;
@@ -104,7 +104,7 @@ int prom_getproplen(int node, char *prop)
  * 'buffer' which has a size of 'bufsize'.  If the acquisition
  * was successful the length will be returned, else -1 is returned.
  */
-int prom_getproperty(int node, char *prop, char *buffer, int bufsize)
+int prom_getproperty(int node, const char *prop, char *buffer, int bufsize)
 {
        int plen, ret;
        unsigned long flags;
@@ -303,7 +303,7 @@ int prom_node_has_property(int node, char *prop)
 /* Set property 'pname' at node 'node' to value 'value' which has a length
  * of 'size' bytes.  Return the number of bytes the prom accepted.
  */
-int prom_setprop(int node, char *pname, char *value, int size)
+int prom_setprop(int node, const char *pname, char *value, int size)
 {
        unsigned long flags;
        int ret;
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
deleted file mode 100644 (file)
index 3b96e70..0000000
+++ /dev/null
@@ -1,433 +0,0 @@
-# sparc64 configuration
-mainmenu "Linux Kernel Configuration for 64-bit SPARC"
-
-config SPARC
-       bool
-       default y
-       select HAVE_OPROFILE
-       select HAVE_KPROBES
-       select HAVE_KRETPROBES
-
-config SPARC64
-       bool
-       default y
-       select HAVE_FUNCTION_TRACER
-       select HAVE_IDE
-       select HAVE_LMB
-       select HAVE_ARCH_KGDB
-       select USE_GENERIC_SMP_HELPERS if SMP
-       select HAVE_ARCH_TRACEHOOK
-       select ARCH_WANT_OPTIONAL_GPIOLIB
-       select RTC_CLASS
-       select RTC_DRV_M48T59
-       select RTC_DRV_CMOS
-       select RTC_DRV_BQ4802
-       select RTC_DRV_SUN4V
-       select RTC_DRV_STARFIRE
-
-config GENERIC_TIME
-       bool
-       default y
-
-config GENERIC_CMOS_UPDATE
-       bool
-       default y
-
-config GENERIC_CLOCKEVENTS
-       bool
-       default y
-
-config GENERIC_GPIO
-       bool
-       help
-         Generic GPIO API support
-
-config 64BIT
-       def_bool y
-
-config MMU
-       bool
-       default y
-
-config IOMMU_HELPER
-       bool
-       default y
-
-config QUICKLIST
-       bool
-       default y
-
-config STACKTRACE_SUPPORT
-       bool
-       default y
-
-config LOCKDEP_SUPPORT
-       bool
-       default y
-
-config ARCH_MAY_HAVE_PC_FDC
-       bool
-       default y
-
-config ARCH_HAS_ILOG2_U32
-       bool
-       default n
-
-config ARCH_HAS_ILOG2_U64
-       bool
-       default n
-
-config AUDIT_ARCH
-       bool
-       default y
-
-config HAVE_SETUP_PER_CPU_AREA
-       def_bool y
-
-config ARCH_NO_VIRT_TO_BUS
-       def_bool y
-
-config OF
-       def_bool y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-       bool
-       def_bool y
-
-source "init/Kconfig"
-source "kernel/Kconfig.freezer"
-
-menu "Processor type and features"
-
-choice
-       prompt "Kernel page size"
-       default SPARC64_PAGE_SIZE_8KB
-
-config SPARC64_PAGE_SIZE_8KB
-       bool "8KB"
-       help
-         This lets you select the page size of the kernel.
-
-         8KB and 64KB work quite well, since SPARC ELF sections
-         provide for up to 64KB alignment.
-
-         If you don't know what to do, choose 8KB.
-
-config SPARC64_PAGE_SIZE_64KB
-       bool "64KB"
-
-endchoice
-
-config SECCOMP
-       bool "Enable seccomp to safely compute untrusted bytecode"
-       depends on PROC_FS
-       default y
-       help
-         This kernel feature is useful for number crunching applications
-         that may need to compute untrusted bytecode during their
-         execution. By using pipes or other transports made available to
-         the process as file descriptors supporting the read/write
-         syscalls, it's possible to isolate those applications in
-         their own address space using seccomp. Once seccomp is
-         enabled via /proc/<pid>/seccomp, it cannot be disabled
-         and the task is only allowed to execute a few safe syscalls
-         defined by each seccomp mode.
-
-         If unsure, say Y. Only embedded should say N here.
-
-source kernel/Kconfig.hz
-
-config HOTPLUG_CPU
-       bool "Support for hot-pluggable CPUs"
-       depends on SMP
-       select HOTPLUG
-       help
-         Say Y here to experiment with turning CPUs off and on.  CPUs
-         can be controlled through /sys/devices/system/cpu/cpu#.
-         Say N if you want to disable CPU hotplug.
-
-config GENERIC_HARDIRQS
-       bool
-       default y
-
-source "kernel/time/Kconfig"
-
-config SMP
-       bool "Symmetric multi-processing support"
-       help
-         This enables support for systems with more than one CPU. If you have
-         a system with only one CPU, say N. If you have a system with more than
-         one CPU, say Y.
-
-         If you say N here, the kernel will run on single and multiprocessor
-         machines, but will use only one CPU of a multiprocessor machine. If
-         you say Y here, the kernel will run on single-processor machines.
-         On a single-processor machine, the kernel will run faster if you say
-         N here.
-
-         If you don't know what to do here, say N.
-
-config NR_CPUS
-       int "Maximum number of CPUs (2-1024)"
-       range 2 1024
-       depends on SMP
-       default "64"
-
-source "drivers/cpufreq/Kconfig"
-
-config US3_FREQ
-       tristate "UltraSPARC-III CPU Frequency driver"
-       depends on CPU_FREQ
-       select CPU_FREQ_TABLE
-       help
-         This adds the CPUFreq driver for UltraSPARC-III processors.
-
-         For details, take a look at <file:Documentation/cpu-freq>.
-
-         If in doubt, say N.
-
-config US2E_FREQ
-       tristate "UltraSPARC-IIe CPU Frequency driver"
-       depends on CPU_FREQ
-       select CPU_FREQ_TABLE
-       help
-         This adds the CPUFreq driver for UltraSPARC-IIe processors.
-
-         For details, take a look at <file:Documentation/cpu-freq>.
-
-         If in doubt, say N.
-
-config US3_MC
-       tristate "UltraSPARC-III Memory Controller driver"
-       default y
-       help
-         This adds a driver for the UltraSPARC-III memory controller.
-         Loading this driver allows exact mnemonic strings to be
-         printed in the event of a memory error, so that the faulty DIMM
-         on the motherboard can be matched to the error.
-
-         If in doubt, say Y, as this information can be very useful.
-
-# Global things across all Sun machines.
-config GENERIC_LOCKBREAK
-       bool
-       default y
-       depends on SMP && PREEMPT
-
-config RWSEM_GENERIC_SPINLOCK
-       bool
-
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-       default y
-
-config GENERIC_FIND_NEXT_BIT
-       bool
-       default y
-
-config GENERIC_HWEIGHT
-       bool
-       default y if !ULTRA_HAS_POPULATION_COUNT
-
-config GENERIC_CALIBRATE_DELAY
-       bool
-       default y
-
-choice
-       prompt "SPARC64 Huge TLB Page Size"
-       depends on HUGETLB_PAGE
-       default HUGETLB_PAGE_SIZE_4MB
-
-config HUGETLB_PAGE_SIZE_4MB
-       bool "4MB"
-
-config HUGETLB_PAGE_SIZE_512K
-       bool "512K"
-
-config HUGETLB_PAGE_SIZE_64K
-       depends on !SPARC64_PAGE_SIZE_64KB
-       bool "64K"
-
-endchoice
-
-endmenu
-
-config NUMA
-       bool "NUMA support"
-       depends on SMP
-
-config NODES_SHIFT
-       int
-       default "4"
-       depends on NEED_MULTIPLE_NODES
-
-# Some NUMA nodes have memory ranges that span
-# other nodes.  Even though a pfn is valid and
-# between a node's start and end pfns, it may not
-# reside on that node.  See memmap_init_zone()
-# for details.
-config NODES_SPAN_OTHER_NODES
-       def_bool y
-       depends on NEED_MULTIPLE_NODES
-
-config ARCH_POPULATES_NODE_MAP
-       def_bool y
-
-config ARCH_SELECT_MEMORY_MODEL
-       def_bool y
-
-config ARCH_SPARSEMEM_ENABLE
-       def_bool y
-       select SPARSEMEM_VMEMMAP_ENABLE
-
-config ARCH_SPARSEMEM_DEFAULT
-       def_bool y
-
-source "mm/Kconfig"
-
-config ISA
-       bool
-
-config ISAPNP
-       bool
-
-config EISA
-       bool
-
-config MCA
-       bool
-
-config PCMCIA
-       tristate
-       help
-         Say Y here if you want to attach PCMCIA- or PC-cards to your Linux
-         computer.  These are credit-card size devices such as network cards,
-         modems or hard drives often used with laptops computers.  There are
-         actually two varieties of these cards: the older 16 bit PCMCIA cards
-         and the newer 32 bit CardBus cards.  If you want to use CardBus
-         cards, you need to say Y here and also to "CardBus support" below.
-
-         To use your PC-cards, you will need supporting software from David
-         Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
-         for location).  Please also read the PCMCIA-HOWTO, available from
-         <http://www.tldp.org/docs.html#howto>.
-
-         To compile this driver as modules, choose M here: the
-         modules will be called pcmcia_core and ds.
-
-config SBUS
-       bool
-       default y
-
-config SBUSCHAR
-       bool
-       default y
-
-config SUN_AUXIO
-       bool
-       default y
-
-config SUN_IO
-       bool
-       default y
-
-config SUN_LDOMS
-       bool "Sun Logical Domains support"
-       help
-         Say Y here is you want to support virtual devices via
-         Logical Domains.
-
-config PCI
-       bool "PCI support"
-       select ARCH_SUPPORTS_MSI
-       help
-         Find out whether your system includes a PCI bus. PCI is the name of
-         a bus system, i.e. the way the CPU talks to the other stuff inside
-         your box.  If you say Y here, the kernel will include drivers and
-         infrastructure code to support PCI bus devices.
-
-config PCI_DOMAINS
-       def_bool PCI
-
-config PCI_SYSCALL
-       def_bool PCI
-
-source "drivers/pci/Kconfig"
-
-config SUN_OPENPROMFS
-       tristate "Openprom tree appears in /proc/openprom"
-       help
-         If you say Y, the OpenPROM device tree will be available as a
-         virtual file system, which you can mount to /proc/openprom by "mount
-         -t openpromfs none /proc/openprom".
-
-         To compile the /proc/openprom support as a module, choose M here: the
-         module will be called openpromfs.  If unsure, choose M.
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-config COMPAT
-       bool
-       default y
-       select COMPAT_BINFMT_ELF
-
-config SYSVIPC_COMPAT
-       bool
-       depends on COMPAT && SYSVIPC
-       default y
-
-endmenu
-
-config SCHED_SMT
-       bool "SMT (Hyperthreading) scheduler support"
-       depends on SMP
-       default y
-       help
-         SMT scheduler support improves the CPU scheduler's decision making
-         when dealing with SPARC cpus at a cost of slightly increased overhead
-         in some places. If unsure say N here.
-
-config SCHED_MC
-       bool "Multi-core scheduler support"
-       depends on SMP
-       default y
-       help
-         Multi-core scheduler support improves the CPU scheduler's decision
-         making when dealing with multi-core CPU chips at a cost of slightly
-         increased overhead in some places. If unsure say N here.
-
-source "kernel/Kconfig.preempt"
-
-config CMDLINE_BOOL
-       bool "Default bootloader kernel arguments"
-
-config CMDLINE
-       string "Initial kernel command string"
-       depends on CMDLINE_BOOL
-       default "console=ttyS0,9600 root=/dev/sda1"
-       help
-         Say Y here if you want to be able to pass default arguments to
-         the kernel. This will be overridden by the bootloader, if you
-         use one (such as SILO). This is most useful if you want to boot
-         a kernel from TFTP, and want default options to be available
-         with having them passed on the command line.
-
-         NOTE: This option WILL override the PROM bootargs setting!
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "drivers/sbus/char/Kconfig"
-
-source "fs/Kconfig"
-
-source "arch/sparc64/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
deleted file mode 100644 (file)
index c40515c..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-menu "Kernel hacking"
-
-config TRACE_IRQFLAGS_SUPPORT
-       bool
-       default y
-
-source "lib/Kconfig.debug"
-
-config DEBUG_STACK_USAGE
-       bool "Enable stack utilization instrumentation"
-       depends on DEBUG_KERNEL
-       help
-         Enables the display of the minimum amount of free stack which each
-         task has ever had available in the sysrq-T and sysrq-P debug output.
-
-         This option will slow down process creation somewhat.
-
-config DEBUG_DCFLUSH
-       bool "D-cache flush debugging"
-       depends on DEBUG_KERNEL
-
-config STACK_DEBUG
-       depends on DEBUG_KERNEL
-       bool "Stack Overflow Detection Support"
-
-config DEBUG_PAGEALLOC
-       bool "Debug page memory allocations"
-       depends on DEBUG_KERNEL && !HIBERNATION
-       help
-         Unmap pages from the kernel linear mapping after free_pages().
-         This results in a large slowdown, but helps to find certain types
-         of memory corruptions.
-
-config MCOUNT
-       bool
-       depends on STACK_DEBUG || FUNCTION_TRACER
-       default y
-
-config FRAME_POINTER
-       bool
-       depends on MCOUNT
-       default y
-
-endmenu
diff --git a/arch/sparc64/Makefile b/arch/sparc64/Makefile
deleted file mode 100644 (file)
index c7214ab..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-# sparc64/Makefile
-#
-# Makefile for the architecture dependent flags and dependencies on the
-# 64-bit Sparc.
-#
-# Copyright (C) 1996,1998 David S. Miller (davem@caip.rutgers.edu)
-# Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
-#
-
-CHECKFLAGS     += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64
-
-# Undefine sparc when processing vmlinux.lds - it is used
-# And teach CPP we are doing 64 bit builds (for this case)
-CPPFLAGS_vmlinux.lds += -m64 -Usparc
-
-LDFLAGS                := -m elf64_sparc
-
-KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow \
-       -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare \
-       -Wa,--undeclared-regs
-KBUILD_CFLAGS += $(call cc-option,-mtune=ultrasparc3)
-KBUILD_AFLAGS += -m64 -mcpu=ultrasparc -Wa,--undeclared-regs
-
-ifeq ($(CONFIG_MCOUNT),y)
-  KBUILD_CFLAGS += -pg
-endif
-
-head-y := arch/sparc64/kernel/head.o arch/sparc64/kernel/init_task.o
-
-core-y                         += arch/sparc64/kernel/ arch/sparc64/mm/
-core-y                         += arch/sparc64/math-emu/
-libs-y                         += arch/sparc64/prom/ arch/sparc64/lib/
-drivers-$(CONFIG_OPROFILE)     += arch/sparc64/oprofile/
-
-boot := arch/sparc64/boot
-
-image tftpboot.img vmlinux.aout: vmlinux
-       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-
-archclean:
-       $(Q)$(MAKE) $(clean)=$(boot)
-
-define archhelp
-  echo  '* vmlinux       - Standard sparc64 kernel'
-  echo  '  vmlinux.aout  - a.out kernel for sparc64'
-  echo  '  tftpboot.img  - Image prepared for tftp'
-endef
-
diff --git a/arch/sparc64/boot/Makefile b/arch/sparc64/boot/Makefile
deleted file mode 100644 (file)
index 0458b52..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-# Makefile for the Sparc64 boot stuff.
-#
-# Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
-# Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
-
-ROOT_IMG       := /usr/src/root.img
-ELFTOAOUT      := elftoaout
-
-hostprogs-y    := piggyback
-targets                := image tftpboot.img vmlinux.aout
-
-quiet_cmd_elftoaout = ELF2AOUT $@
-      cmd_elftoaout = $(ELFTOAOUT) vmlinux -o $@
-quiet_cmd_piggy     = PIGGY   $@
-      cmd_piggy     = $(obj)/piggyback $@ System.map $(ROOT_IMG)
-quiet_cmd_strip     = STRIP   $@
-      cmd_strip     = $(STRIP) -R .comment -R .note -K sun4u_init -K _end -K _start vmlinux -o $@
-
-
-# Actual linking
-$(obj)/image: vmlinux FORCE
-       $(call if_changed,strip)
-       @echo '  kernel: $@ is ready'
-
-$(obj)/tftpboot.img: vmlinux $(obj)/piggyback System.map $(ROOT_IMG) FORCE
-       $(call if_changed,elftoaout)
-       $(call if_changed,piggy)
-       @echo '  kernel: $@ is ready'
-
-$(obj)/vmlinux.aout: vmlinux FORCE
-       $(call if_changed,elftoaout)
-       @echo '  kernel: $@ is ready'
-
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
deleted file mode 100644 (file)
index b3e0b98..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-EXTRA_AFLAGS := -ansi
-EXTRA_CFLAGS := -Werror
-
-CFLAGS_REMOVE_ftrace.o = -pg
-
-extra-y                := head.o init_task.o vmlinux.lds
-
-obj-y          := process.o setup.o cpu.o idprom.o reboot.o \
-                  traps.o auxio.o una_asm.o sysfs.o iommu.o \
-                  irq.o ptrace.o time.o sys_sparc.o signal.o \
-                  unaligned.o central.o starfire.o \
-                  power.o sbus.o sparc64_ksyms.o ebus.o \
-                  visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
-
-obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-obj-$(CONFIG_STACKTRACE) += stacktrace.o
-obj-$(CONFIG_PCI)       += pci.o pci_common.o psycho_common.o \
-                           pci_psycho.o pci_sabre.o pci_schizo.o \
-                           pci_sun4v.o pci_sun4v_asm.o pci_fire.o
-obj-$(CONFIG_PCI_MSI)  += pci_msi.o
-obj-$(CONFIG_SMP)       += smp.o trampoline.o hvtramp.o
-obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o
-obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o
-obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o
-obj-$(CONFIG_US3_MC) += chmc.o
-obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o
-obj-$(CONFIG_AUDIT) += audit.o
-obj-$(CONFIG_AUDIT)$(CONFIG_COMPAT) += compat_audit.o
-obj-y += $(obj-yy)
-obj-$(CONFIG_KGDB) += kgdb.o
diff --git a/arch/sparc64/kernel/asm-offsets.c b/arch/sparc64/kernel/asm-offsets.c
deleted file mode 100644 (file)
index 9e26311..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* Dummy asm-offsets.c file. Required by kbuild and ready to be used - hint! */
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
deleted file mode 100644 (file)
index 0c9ac83..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-/* cpu.c: Dinky routines to look for the kind of Sparc cpu
- *        we are on.
- *
- * Copyright (C) 1996, 2007, 2008 David S. Miller (davem@davemloft.net)
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <asm/asi.h>
-#include <asm/system.h>
-#include <asm/fpumacro.h>
-#include <asm/cpudata.h>
-#include <asm/spitfire.h>
-#include <asm/oplib.h>
-
-#include "entry.h"
-
-DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
-
-struct cpu_chip_info {
-       unsigned short  manuf;
-       unsigned short  impl;
-       const char      *cpu_name;
-       const char      *fp_name;
-};
-
-static const struct cpu_chip_info cpu_chips[] = {
-       {
-               .manuf          = 0x17,
-               .impl           = 0x10,
-               .cpu_name       = "TI UltraSparc I   (SpitFire)",
-               .fp_name        = "UltraSparc I integrated FPU",
-       },
-       {
-               .manuf          = 0x22,
-               .impl           = 0x10,
-               .cpu_name       = "TI UltraSparc I   (SpitFire)",
-               .fp_name        = "UltraSparc I integrated FPU",
-       },
-       {
-               .manuf          = 0x17,
-               .impl           = 0x11,
-               .cpu_name       = "TI UltraSparc II  (BlackBird)",
-               .fp_name        = "UltraSparc II integrated FPU",
-       },
-       {
-               .manuf          = 0x17,
-               .impl           = 0x12,
-               .cpu_name       = "TI UltraSparc IIi (Sabre)",
-               .fp_name        = "UltraSparc IIi integrated FPU",
-       },
-       {
-               .manuf          = 0x17,
-               .impl           = 0x13,
-               .cpu_name       = "TI UltraSparc IIe (Hummingbird)",
-               .fp_name        = "UltraSparc IIe integrated FPU",
-       },
-       {
-               .manuf          = 0x3e,
-               .impl           = 0x14,
-               .cpu_name       = "TI UltraSparc III (Cheetah)",
-               .fp_name        = "UltraSparc III integrated FPU",
-       },
-       {
-               .manuf          = 0x3e,
-               .impl           = 0x15,
-               .cpu_name       = "TI UltraSparc III+ (Cheetah+)",
-               .fp_name        = "UltraSparc III+ integrated FPU",
-       },
-       {
-               .manuf          = 0x3e,
-               .impl           = 0x16,
-               .cpu_name       = "TI UltraSparc IIIi (Jalapeno)",
-               .fp_name        = "UltraSparc IIIi integrated FPU",
-       },
-       {
-               .manuf          = 0x3e,
-               .impl           = 0x18,
-               .cpu_name       = "TI UltraSparc IV (Jaguar)",
-               .fp_name        = "UltraSparc IV integrated FPU",
-       },
-       {
-               .manuf          = 0x3e,
-               .impl           = 0x19,
-               .cpu_name       = "TI UltraSparc IV+ (Panther)",
-               .fp_name        = "UltraSparc IV+ integrated FPU",
-       },
-       {
-               .manuf          = 0x3e,
-               .impl           = 0x22,
-               .cpu_name       = "TI UltraSparc IIIi+ (Serrano)",
-               .fp_name        = "UltraSparc IIIi+ integrated FPU",
-       },
-};
-
-#define NSPARCCHIPS ARRAY_SIZE(linux_sparc_chips)
-
-const char *sparc_cpu_type;
-const char *sparc_fpu_type;
-
-static void __init sun4v_cpu_probe(void)
-{
-       switch (sun4v_chip_type) {
-       case SUN4V_CHIP_NIAGARA1:
-               sparc_cpu_type = "UltraSparc T1 (Niagara)";
-               sparc_fpu_type = "UltraSparc T1 integrated FPU";
-               break;
-
-       case SUN4V_CHIP_NIAGARA2:
-               sparc_cpu_type = "UltraSparc T2 (Niagara2)";
-               sparc_fpu_type = "UltraSparc T2 integrated FPU";
-               break;
-
-       default:
-               printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
-                      prom_cpu_compatible);
-               sparc_cpu_type = "Unknown SUN4V CPU";
-               sparc_fpu_type = "Unknown SUN4V FPU";
-               break;
-       }
-}
-
-static const struct cpu_chip_info * __init find_cpu_chip(unsigned short manuf,
-                                                        unsigned short impl)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(cpu_chips); i++) {
-               const struct cpu_chip_info *p = &cpu_chips[i];
-
-               if (p->manuf == manuf && p->impl == impl)
-                       return p;
-       }
-       return NULL;
-}
-
-static int __init cpu_type_probe(void)
-{
-       if (tlb_type == hypervisor) {
-               sun4v_cpu_probe();
-       } else {
-               unsigned long ver, manuf, impl;
-               const struct cpu_chip_info *p;
-       
-               __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
-       
-               manuf = ((ver >> 48) & 0xffff);
-               impl = ((ver >> 32) & 0xffff);
-
-               p = find_cpu_chip(manuf, impl);
-               if (p) {
-                       sparc_cpu_type = p->cpu_name;
-                       sparc_fpu_type = p->fp_name;
-               } else {
-                       printk(KERN_ERR "CPU: Unknown chip, manuf[%lx] impl[%lx]\n",
-                              manuf, impl);
-                       sparc_cpu_type = "Unknown CPU";
-                       sparc_fpu_type = "Unknown FPU";
-               }
-       }
-       return 0;
-}
-
-arch_initcall(cpu_type_probe);
diff --git a/arch/sparc64/kernel/idprom.c b/arch/sparc64/kernel/idprom.c
deleted file mode 100644 (file)
index a62ff83..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * idprom.c: Routines to load the idprom into kernel addresses and
- *           interpret the data contained within.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <asm/oplib.h>
-#include <asm/idprom.h>
-
-struct idprom *idprom;
-static struct idprom idprom_buffer;
-
-/* Calculate the IDPROM checksum (xor of the data bytes). */
-static unsigned char __init calc_idprom_cksum(struct idprom *idprom)
-{
-       unsigned char cksum, i, *ptr = (unsigned char *)idprom;
-
-       for (i = cksum = 0; i <= 0x0E; i++)
-               cksum ^= *ptr++;
-
-       return cksum;
-}
-
-/* Create a local IDPROM copy and verify integrity. */
-void __init idprom_init(void)
-{
-       prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer));
-
-       idprom = &idprom_buffer;
-
-       if (idprom->id_format != 0x01)  {
-               prom_printf("IDPROM: Warning, unknown format type!\n");
-       }
-
-       if (idprom->id_cksum != calc_idprom_cksum(idprom)) {
-               prom_printf("IDPROM: Warning, checksum failure (nvram=%x, calc=%x)!\n",
-                           idprom->id_cksum, calc_idprom_cksum(idprom));
-       }
-
-       printk("Ethernet address: %pM\n", idprom->id_ethaddr);
-}
diff --git a/arch/sparc64/kernel/init_task.c b/arch/sparc64/kernel/init_task.c
deleted file mode 100644 (file)
index d2b3123..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init_task.h>
-#include <linux/mqueue.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
-
-static struct fs_struct init_fs = INIT_FS;
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
-/* .text section in head.S is aligned at 2 page boundary and this gets linked
- * right after that so that the init_thread_union is aligned properly as well.
- * We really don't need this special alignment like the Intel does, but
- * I do it anyways for completeness.
- */
-__asm__ (".text");
-union thread_union init_thread_union = { INIT_THREAD_INFO(init_task) };
-
-/*
- * Initial task structure.
- *
- * All other task structs will be allocated on slabs in fork.c
- */
-EXPORT_SYMBOL(init_task);
-
-__asm__(".data");
-struct task_struct init_task = INIT_TASK(init_task);
diff --git a/arch/sparc64/kernel/module.c b/arch/sparc64/kernel/module.c
deleted file mode 100644 (file)
index 158484b..0000000
+++ /dev/null
@@ -1,213 +0,0 @@
-/* Kernel module help for sparc64.
- *
- * Copyright (C) 2001 Rusty Russell.
- * Copyright (C) 2002 David S. Miller.
- */
-
-#include <linux/moduleloader.h>
-#include <linux/kernel.h>
-#include <linux/elf.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-
-#include <asm/processor.h>
-#include <asm/spitfire.h>
-
-static void *module_map(unsigned long size)
-{
-       struct vm_struct *area;
-
-       size = PAGE_ALIGN(size);
-       if (!size || size > MODULES_LEN)
-               return NULL;
-
-       area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
-       if (!area)
-               return NULL;
-
-       return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
-}
-
-void *module_alloc(unsigned long size)
-{
-       void *ret;
-
-       /* We handle the zero case fine, unlike vmalloc */
-       if (size == 0)
-               return NULL;
-
-       ret = module_map(size);
-       if (!ret)
-               ret = ERR_PTR(-ENOMEM);
-       else
-               memset(ret, 0, size);
-
-       return ret;
-}
-
-/* Free memory returned from module_core_alloc/module_init_alloc */
-void module_free(struct module *mod, void *module_region)
-{
-       vfree(module_region);
-       /* FIXME: If module_region == mod->init_region, trim exception
-           table entries. */
-}
-
-/* Make generic code ignore STT_REGISTER dummy undefined symbols.  */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
-                             Elf_Shdr *sechdrs,
-                             char *secstrings,
-                             struct module *mod)
-{
-       unsigned int symidx;
-       Elf64_Sym *sym;
-       const char *strtab;
-       int i;
-
-       for (symidx = 0; sechdrs[symidx].sh_type != SHT_SYMTAB; symidx++) {
-               if (symidx == hdr->e_shnum-1) {
-                       printk("%s: no symtab found.\n", mod->name);
-                       return -ENOEXEC;
-               }
-       }
-       sym = (Elf64_Sym *)sechdrs[symidx].sh_addr;
-       strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr;
-
-       for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) {
-               if (sym[i].st_shndx == SHN_UNDEF &&
-                   ELF64_ST_TYPE(sym[i].st_info) == STT_REGISTER)
-                       sym[i].st_shndx = SHN_ABS;
-       }
-       return 0;
-}
-
-int apply_relocate(Elf64_Shdr *sechdrs,
-                  const char *strtab,
-                  unsigned int symindex,
-                  unsigned int relsec,
-                  struct module *me)
-{
-       printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n",
-              me->name);
-       return -ENOEXEC;
-}
-
-int apply_relocate_add(Elf64_Shdr *sechdrs,
-                      const char *strtab,
-                      unsigned int symindex,
-                      unsigned int relsec,
-                      struct module *me)
-{
-       unsigned int i;
-       Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
-       Elf64_Sym *sym;
-       u8 *location;
-       u32 *loc32;
-
-       for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
-               Elf64_Addr v;
-
-               /* This is where to make the change */
-               location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr
-                       + rel[i].r_offset;
-               loc32 = (u32 *) location;
-
-               BUG_ON(((u64)location >> (u64)32) != (u64)0);
-
-               /* This is the symbol it is referring to.  Note that all
-                  undefined symbols have been resolved.  */
-               sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
-                       + ELF64_R_SYM(rel[i].r_info);
-               v = sym->st_value + rel[i].r_addend;
-
-               switch (ELF64_R_TYPE(rel[i].r_info) & 0xff) {
-               case R_SPARC_64:
-                       location[0] = v >> 56;
-                       location[1] = v >> 48;
-                       location[2] = v >> 40;
-                       location[3] = v >> 32;
-                       location[4] = v >> 24;
-                       location[5] = v >> 16;
-                       location[6] = v >>  8;
-                       location[7] = v >>  0;
-                       break;
-
-               case R_SPARC_32:
-                       location[0] = v >> 24;
-                       location[1] = v >> 16;
-                       location[2] = v >>  8;
-                       location[3] = v >>  0;
-                       break;
-
-               case R_SPARC_DISP32:
-                       v -= (Elf64_Addr) location;
-                       *loc32 = v;
-                       break;
-
-               case R_SPARC_WDISP30:
-                       v -= (Elf64_Addr) location;
-                       *loc32 = (*loc32 & ~0x3fffffff) |
-                               ((v >> 2) & 0x3fffffff);
-                       break;
-
-               case R_SPARC_WDISP22:
-                       v -= (Elf64_Addr) location;
-                       *loc32 = (*loc32 & ~0x3fffff) |
-                               ((v >> 2) & 0x3fffff);
-                       break;
-
-               case R_SPARC_WDISP19:
-                       v -= (Elf64_Addr) location;
-                       *loc32 = (*loc32 & ~0x7ffff) |
-                               ((v >> 2) & 0x7ffff);
-                       break;
-
-               case R_SPARC_LO10:
-                       *loc32 = (*loc32 & ~0x3ff) | (v & 0x3ff);
-                       break;
-
-               case R_SPARC_HI22:
-                       *loc32 = (*loc32 & ~0x3fffff) |
-                               ((v >> 10) & 0x3fffff);
-                       break;
-
-               case R_SPARC_OLO10:
-                       *loc32 = (*loc32 & ~0x1fff) |
-                               (((v & 0x3ff) +
-                                 (ELF64_R_TYPE(rel[i].r_info) >> 8))
-                                & 0x1fff);
-                       break;
-
-               default:
-                       printk(KERN_ERR "module %s: Unknown relocation: %x\n",
-                              me->name,
-                              (int) (ELF64_R_TYPE(rel[i].r_info) & 0xff));
-                       return -ENOEXEC;
-               };
-       }
-       return 0;
-}
-
-int module_finalize(const Elf_Ehdr *hdr,
-                   const Elf_Shdr *sechdrs,
-                   struct module *me)
-{
-       /* Cheetah's I-cache is fully coherent.  */
-       if (tlb_type == spitfire) {
-               unsigned long va;
-
-               flushw_all();
-               for (va =  0; va < (PAGE_SIZE << 1); va += 32)
-                       spitfire_put_icache_tag(va, 0x0);
-               __asm__ __volatile__("flush %g6");
-       }
-
-       return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
deleted file mode 100644 (file)
index 01f8096..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-/* ld script to make UltraLinux kernel */
-
-#include <asm/page.h>
-#include <asm-generic/vmlinux.lds.h>
-
-OUTPUT_FORMAT("elf64-sparc", "elf64-sparc", "elf64-sparc")
-OUTPUT_ARCH(sparc:v9a)
-ENTRY(_start)
-
-jiffies = jiffies_64;
-SECTIONS
-{
-       swapper_low_pmd_dir = 0x0000000000402000;
-       . = 0x4000;
-       .text 0x0000000000404000 : {
-               _text = .;
-               TEXT_TEXT
-               SCHED_TEXT
-               LOCK_TEXT
-               KPROBES_TEXT
-               *(.gnu.warning)
-       } = 0
-       _etext = .;
-       PROVIDE (etext = .);
-
-       RO_DATA(PAGE_SIZE)
-       .data : {
-               DATA_DATA
-               CONSTRUCTORS
-       }
-       .data1 : {
-               *(.data1)
-       }
-       . = ALIGN(64);
-       .data.cacheline_aligned : {
-               *(.data.cacheline_aligned)
-       }
-       . = ALIGN(64);
-       .data.read_mostly : {
-               *(.data.read_mostly)
-       }
-       _edata = .;
-       PROVIDE (edata = .);
-       .fixup : {
-               *(.fixup)
-       }
-       . = ALIGN(16);
-       __ex_table : {
-               __start___ex_table = .;
-               *(__ex_table)
-               __stop___ex_table = .;
-       }
-       NOTES
-
-       . = ALIGN(PAGE_SIZE);
-       .init.text : {
-               __init_begin = .;
-               _sinittext = .;
-               INIT_TEXT
-               _einittext = .;
-       }
-       .init.data : {
-               INIT_DATA
-       }
-       . = ALIGN(16);
-       .init.setup : {
-               __setup_start = .;
-               *(.init.setup)
-               __setup_end = .;
-       }
-       .initcall.init : {
-               __initcall_start = .;
-               INITCALLS
-               __initcall_end = .;
-       }
-       .con_initcall.init : {
-               __con_initcall_start = .;
-               *(.con_initcall.init)
-               __con_initcall_end = .;
-       }
-       SECURITY_INIT
-
-       . = ALIGN(4);
-       .tsb_ldquad_phys_patch : {
-               __tsb_ldquad_phys_patch = .;
-               *(.tsb_ldquad_phys_patch)
-               __tsb_ldquad_phys_patch_end = .;
-       }
-
-       .tsb_phys_patch : {
-               __tsb_phys_patch = .;
-               *(.tsb_phys_patch)
-               __tsb_phys_patch_end = .;
-       }
-
-       .cpuid_patch : {
-               __cpuid_patch = .;
-               *(.cpuid_patch)
-               __cpuid_patch_end = .;
-       }
-
-       .sun4v_1insn_patch : {
-               __sun4v_1insn_patch = .;
-               *(.sun4v_1insn_patch)
-               __sun4v_1insn_patch_end = .;
-       }
-       .sun4v_2insn_patch : {
-               __sun4v_2insn_patch = .;
-               *(.sun4v_2insn_patch)
-               __sun4v_2insn_patch_end = .;
-       }
-
-#ifdef CONFIG_BLK_DEV_INITRD
-       . = ALIGN(PAGE_SIZE);
-       .init.ramfs : {
-               __initramfs_start = .;
-               *(.init.ramfs)
-               __initramfs_end = .;
-       }
-#endif
-
-       PERCPU(PAGE_SIZE)
-
-       . = ALIGN(PAGE_SIZE);
-       __init_end = .;
-       __bss_start = .;
-       .sbss : {
-               *(.sbss)
-               *(.scommon)
-       }
-       .bss : {
-               *(.dynbss)
-               *(.bss)
-               *(COMMON)
-       }
-       _end = . ;
-       PROVIDE (end = .);
-
-       /DISCARD/ : {
-               EXIT_TEXT
-               EXIT_DATA
-               *(.exitcall.exit)
-       }
-
-       STABS_DEBUG
-       DWARF_DEBUG
-}
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
deleted file mode 100644 (file)
index f095e13..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Makefile for Sparc64 library files..
-#
-
-EXTRA_AFLAGS := -ansi
-EXTRA_CFLAGS := -Werror
-
-lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
-        memscan.o strncpy_from_user.o strlen_user.o memcmp.o checksum.o \
-        bzero.o csum_copy.o csum_copy_from_user.o csum_copy_to_user.o \
-        VISsave.o atomic.o bitops.o \
-        U1memcpy.o U1copy_from_user.o U1copy_to_user.o \
-        U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
-        NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \
-        NGpage.o NGbzero.o \
-        NG2memcpy.o NG2copy_from_user.o NG2copy_to_user.o NG2patch.o \
-        NG2page.o \
-        GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o GENpatch.o \
-        GENpage.o GENbzero.o \
-        copy_in_user.o user_fixup.o memmove.o \
-        mcount.o ipcsum.o rwsem.o xor.o
-
-obj-y += iomap.o
diff --git a/arch/sparc64/lib/iomap.c b/arch/sparc64/lib/iomap.c
deleted file mode 100644 (file)
index 7120ebb..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Implement the sparc64 iomap interfaces
- */
-#include <linux/pci.h>
-#include <linux/module.h>
-#include <asm/io.h>
-
-/* Create a virtual mapping cookie for an IO port range */
-void __iomem *ioport_map(unsigned long port, unsigned int nr)
-{
-       return (void __iomem *) (unsigned long) port;
-}
-
-void ioport_unmap(void __iomem *addr)
-{
-       /* Nothing to do */
-}
-EXPORT_SYMBOL(ioport_map);
-EXPORT_SYMBOL(ioport_unmap);
-
-/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-       resource_size_t start = pci_resource_start(dev, bar);
-       resource_size_t len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len || !start)
-               return NULL;
-       if (maxlen && len > maxlen)
-               len = maxlen;
-       if (flags & IORESOURCE_IO)
-               return ioport_map(start, len);
-       if (flags & IORESOURCE_MEM) {
-               if (flags & IORESOURCE_CACHEABLE)
-                       return ioremap(start, len);
-               return ioremap_nocache(start, len);
-       }
-       /* What? */
-       return NULL;
-}
-
-void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
-{
-       /* nothing to do */
-}
-EXPORT_SYMBOL(pci_iomap);
-EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/sparc64/lib/memcmp.S b/arch/sparc64/lib/memcmp.S
deleted file mode 100644 (file)
index d3fdaa8..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Sparc64 optimized memcmp code.
- *
- * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- * Copyright (C) 2000 David S. Miller (davem@redhat.com)
- */
-
-       .text
-       .align  32
-       .globl  __memcmp, memcmp
-__memcmp:
-memcmp:
-       cmp     %o2, 0          ! IEU1  Group
-loop:  be,pn   %icc, ret_0     ! CTI
-        nop                    ! IEU0
-       ldub    [%o0], %g7      ! LSU   Group
-       ldub    [%o1], %g3      ! LSU   Group
-       sub     %o2, 1, %o2     ! IEU0
-       add     %o0, 1, %o0     ! IEU1
-       add     %o1, 1, %o1     ! IEU0  Group
-       subcc   %g7, %g3, %g3   ! IEU1  Group
-       be,pt   %icc, loop      ! CTI
-        cmp    %o2, 0          ! IEU1  Group
-
-ret_n0:        retl
-        mov    %g3, %o0
-ret_0: retl
-        mov    0, %o0
diff --git a/arch/sparc64/lib/strlen.S b/arch/sparc64/lib/strlen.S
deleted file mode 100644 (file)
index e9ba192..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-/* strlen.S: Sparc64 optimized strlen code
- * Hand optimized from GNU libc's strlen
- * Copyright (C) 1991,1996 Free Software Foundation
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#define LO_MAGIC 0x01010101
-#define HI_MAGIC 0x80808080
-
-       .align  32
-       .globl  strlen
-       .type   strlen,#function
-strlen:
-       mov     %o0, %o1
-       andcc   %o0, 3, %g0
-       be,pt   %icc, 9f
-        sethi  %hi(HI_MAGIC), %o4
-       ldub    [%o0], %o5
-       brz,pn  %o5, 11f
-        add    %o0, 1, %o0
-       andcc   %o0, 3, %g0
-       be,pn   %icc, 4f
-        or     %o4, %lo(HI_MAGIC), %o3
-       ldub    [%o0], %o5
-       brz,pn  %o5, 12f
-        add    %o0, 1, %o0
-       andcc   %o0, 3, %g0
-       be,pt   %icc, 5f
-        sethi  %hi(LO_MAGIC), %o4
-       ldub    [%o0], %o5
-       brz,pn  %o5, 13f
-        add    %o0, 1, %o0
-       ba,pt   %icc, 8f
-        or     %o4, %lo(LO_MAGIC), %o2
-9:
-       or      %o4, %lo(HI_MAGIC), %o3
-4:
-       sethi   %hi(LO_MAGIC), %o4
-5:
-       or      %o4, %lo(LO_MAGIC), %o2
-8:
-       ld      [%o0], %o5
-2:
-       sub     %o5, %o2, %o4
-       andcc   %o4, %o3, %g0
-       be,pt   %icc, 8b
-        add    %o0, 4, %o0
-
-       /* Check every byte. */
-       srl     %o5, 24, %g7
-       andcc   %g7, 0xff, %g0
-       be,pn   %icc, 1f
-        add    %o0, -4, %o4
-       srl     %o5, 16, %g7
-       andcc   %g7, 0xff, %g0
-       be,pn   %icc, 1f
-        add    %o4, 1, %o4
-       srl     %o5, 8, %g7
-       andcc   %g7, 0xff, %g0
-       be,pn   %icc, 1f
-        add    %o4, 1, %o4
-       andcc   %o5, 0xff, %g0
-       bne,a,pt %icc, 2b
-        ld     [%o0], %o5
-       add     %o4, 1, %o4
-1:
-       retl
-        sub    %o4, %o1, %o0
-11:
-       retl
-        mov    0, %o0
-12:
-       retl
-        mov    1, %o0
-13:
-       retl
-        mov    2, %o0
-
-       .size   strlen, .-strlen
diff --git a/arch/sparc64/math-emu/Makefile b/arch/sparc64/math-emu/Makefile
deleted file mode 100644 (file)
index cc5cb9b..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the FPU instruction emulation.
-#
-
-obj-y    := math.o
-
-EXTRA_CFLAGS = -Iinclude/math-emu -w
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile
deleted file mode 100644 (file)
index 68d04c0..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-# Makefile for the linux Sparc64-specific parts of the memory manager.
-#
-
-EXTRA_AFLAGS := -ansi
-EXTRA_CFLAGS := -Werror
-
-obj-y    := ultra.o tlb.o tsb.o fault.o init.o generic.o
-
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc64/oprofile/Makefile b/arch/sparc64/oprofile/Makefile
deleted file mode 100644 (file)
index e9feca1..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
-               oprof.o cpu_buffer.o buffer_sync.o \
-               event_buffer.o oprofile_files.o \
-               oprofilefs.o oprofile_stats.o \
-               timer_int.o )
-
-oprofile-y                             := $(DRIVER_OBJS) init.o
diff --git a/arch/sparc64/oprofile/init.c b/arch/sparc64/oprofile/init.c
deleted file mode 100644 (file)
index 17bb603..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * @file init.c
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/oprofile.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
-       return -ENODEV;
-}
-
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/sparc64/prom/Makefile b/arch/sparc64/prom/Makefile
deleted file mode 100644 (file)
index 8c94483..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-# Makefile for the Sun Boot PROM interface library under
-# Linux.
-#
-
-EXTRA_AFLAGS := -ansi
-EXTRA_CFLAGS := -Werror
-
-lib-y   := bootstr.o devops.o init.o misc.o \
-          tree.o console.o printf.o p1275.o cif.o
diff --git a/arch/sparc64/prom/printf.c b/arch/sparc64/prom/printf.c
deleted file mode 100644 (file)
index 660943e..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * printf.c:  Internal prom library printf facility.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- * Copyright (c) 2002 Pete Zaitcev (zaitcev@yahoo.com)
- *
- * We used to warn all over the code: DO NOT USE prom_printf(),
- * and yet people do. Anton's banking code was outputting banks
- * with prom_printf for most of the 2.4 lifetime. Since an effective
- * stick is not available, we deployed a carrot: an early printk
- * through PROM by means of -p boot option. This ought to fix it.
- * USE printk; if you need, deploy -p.
- */
-
-#include <linux/kernel.h>
-
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-
-static char ppbuf[1024];
-
-void
-prom_write(const char *buf, unsigned int n)
-{
-       char ch;
-
-       while (n != 0) {
-               --n;
-               if ((ch = *buf++) == '\n')
-                       prom_putchar('\r');
-               prom_putchar(ch);
-       }
-}
-
-void
-prom_printf(const char *fmt, ...)
-{
-       va_list args;
-       int i;
-
-       va_start(args, fmt);
-       i = vscnprintf(ppbuf, sizeof(ppbuf), fmt, args);
-       va_end(args);
-
-       prom_write(ppbuf, i);
-}
index 753346e2cdfd64d5d472f3388014952f5d0ca4ff..ae5f94d6317d584c051fbcea83533431e4b635d7 100644 (file)
@@ -11,21 +11,21 @@ extern int get_signals(void);
 extern void block_signals(void);
 extern void unblock_signals(void);
 
-#define local_save_flags(flags) do { typecheck(unsigned long, flags); \
+#define raw_local_save_flags(flags) do { typecheck(unsigned long, flags); \
                                     (flags) = get_signals(); } while(0)
-#define local_irq_restore(flags) do { typecheck(unsigned long, flags); \
+#define raw_local_irq_restore(flags) do { typecheck(unsigned long, flags); \
                                      set_signals(flags); } while(0)
 
-#define local_irq_save(flags) do { local_save_flags(flags); \
-                                   local_irq_disable(); } while(0)
+#define raw_local_irq_save(flags) do { raw_local_save_flags(flags); \
+                                   raw_local_irq_disable(); } while(0)
 
-#define local_irq_enable() unblock_signals()
-#define local_irq_disable() block_signals()
+#define raw_local_irq_enable() unblock_signals()
+#define raw_local_irq_disable() block_signals()
 
 #define irqs_disabled()                 \
 ({                                      \
         unsigned long flags;            \
-        local_save_flags(flags);        \
+        raw_local_save_flags(flags);        \
         (flags == 0);                   \
 })
 
index 98a0ed52b5c39ec9d728d89c557353f97f986a32..0f44add3e0b7850e676cf8cc45d8514ebc94a5ea 100644 (file)
@@ -247,6 +247,28 @@ config X86_HAS_BOOT_CPU_ID
        def_bool y
        depends on X86_VOYAGER
 
+config SPARSE_IRQ
+       bool "Support sparse irq numbering"
+       depends on PCI_MSI || HT_IRQ
+       help
+         This enables support for sparse irqs. This is useful for distro
+         kernels that want to define a high CONFIG_NR_CPUS value but still
+         want to have low kernel memory footprint on smaller machines.
+
+         ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread
+           out the irq_desc[] array in a more NUMA-friendly way. )
+
+         If you don't know what to do here, say N.
+
+config NUMA_MIGRATE_IRQ_DESC
+       bool "Move irq desc when changing irq smp_affinity"
+       depends on SPARSE_IRQ && NUMA
+       default n
+       help
+         This enables moving irq_desc to cpu/node that irq will use handled.
+
+         If you don't know what to do here, say N.
+
 config X86_FIND_SMP_CONFIG
        def_bool y
        depends on X86_MPPARSE || X86_VOYAGER
@@ -479,7 +501,7 @@ config HPET_TIMER
          The HPET provides a stable time base on SMP
          systems, unlike the TSC, but it is more expensive to access,
          as it is off-chip.  You can find the HPET spec at
-         <http://www.intel.com/hardwaredesign/hpetspec.htm>.
+         <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
 
          You can safely choose Y here.  However, HPET will only be
          activated if the platform and the BIOS support this feature.
index dc22c0733282b9ce631250d69cbcc00e0cd46617..4035357f5b9d6b830a16b09f39d30e90810aae9e 100644 (file)
@@ -65,7 +65,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
                return dma_ops;
        else
                return dev->archdata.dma_ops;
-#endif /* _ASM_X86_DMA_MAPPING_H */
+#endif
 }
 
 /* Make sure we keep the same behaviour */
index e475e009ae5d4e60dc0e828ad96373ef91f2799a..7a1f44ac1f17e127e054852d2ccc8baefb27b528 100644 (file)
@@ -198,17 +198,14 @@ extern void restore_IO_APIC_setup(void);
 extern void reinit_intr_remapped_IO_APIC(int);
 #endif
 
-extern int probe_nr_irqs(void);
+extern void probe_nr_irqs_gsi(void);
 
 #else  /* !CONFIG_X86_IO_APIC */
 #define io_apic_assign_pci_irqs 0
 static const int timer_through_8259 = 0;
-static inline void ioapic_init_mappings(void) { }
+static inline void ioapic_init_mappings(void)  { }
 
-static inline int probe_nr_irqs(void)
-{
-       return NR_IRQS;
-}
+static inline void probe_nr_irqs_gsi(void)     { }
 #endif
 
 #endif /* _ASM_X86_IO_APIC_H */
index 295b13193f4df09b05aad53ec09c81c601a17a9f..a6ee9e6f530f89cc2e86a5607ceff6597c9d1757 100644 (file)
@@ -7,8 +7,6 @@ extern struct dma_mapping_ops nommu_dma_ops;
 extern int force_iommu, no_iommu;
 extern int iommu_detected;
 
-extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
-
 /* 10 seconds */
 #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
 
index 0005adb0f941f5f564cbad634538db8b149f0f46..f7ff65032b9d66aee077fcc3ddbcddd40bd4f75d 100644 (file)
 #define LAST_VM86_IRQ          15
 #define invalid_vm86_irq(irq)  ((irq) < 3 || (irq) > 15)
 
+#define NR_IRQS_LEGACY         16
+
 #if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
+
+#ifndef CONFIG_SPARSE_IRQ
 # if NR_CPUS < MAX_IO_APICS
 #  define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
 # else
 #  define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
 # endif
+#else
+# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
+#  define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
+# else
+#  define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
+# endif
+#endif
 
 #elif defined(CONFIG_X86_VOYAGER)
 
index 647781298e7ef7cbc98b14ad75f2552b0092507f..66834c41c0493eccf1b117b1565443c10ec706b6 100644 (file)
@@ -84,6 +84,8 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
 static inline void early_quirks(void) { }
 #endif
 
+extern void pci_iommu_alloc(void);
+
 #endif  /* __KERNEL__ */
 
 #ifdef CONFIG_X86_32
index d02d936840a3ed8e3b7f94fade13929c9a3d6413..4da207982777e75a970fca5fee80cb2883381e4e 100644 (file)
@@ -23,7 +23,6 @@ extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
                               int reg, int len, u32 value);
 
 extern void dma32_reserve_bootmem(void);
-extern void pci_iommu_alloc(void);
 
 /* The PCI address space does equal the physical memory
  * address space.  The networking and block device layers use
index 580c3ee6c58c4d0479dbce9ea491c78049255e04..4340055b755918fb8fd777df62f811ba5e26820c 100644 (file)
@@ -157,6 +157,7 @@ extern int __get_user_bad(void);
        int __ret_gu;                                                   \
        unsigned long __val_gu;                                         \
        __chk_user_ptr(ptr);                                            \
+       might_fault();                                                  \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
                __get_user_x(1, __ret_gu, __val_gu, ptr);               \
@@ -241,6 +242,7 @@ extern void __put_user_8(void);
        int __ret_pu;                                           \
        __typeof__(*(ptr)) __pu_val;                            \
        __chk_user_ptr(ptr);                                    \
+       might_fault();                                          \
        __pu_val = x;                                           \
        switch (sizeof(*(ptr))) {                               \
        case 1:                                                 \
index d095a3aeea1b44d3063f0165c956d0a156c6ca38..5e06259e90e5a736539948ae22e1de25444ba485 100644 (file)
@@ -82,8 +82,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 static __always_inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       might_sleep();
-       return __copy_to_user_inatomic(to, from, n);
+       might_fault();
+       return __copy_to_user_inatomic(to, from, n);
 }
 
 static __always_inline unsigned long
@@ -137,7 +137,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 static __always_inline unsigned long
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       might_sleep();
+       might_fault();
        if (__builtin_constant_p(n)) {
                unsigned long ret;
 
@@ -159,7 +159,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
 static __always_inline unsigned long __copy_from_user_nocache(void *to,
                                const void __user *from, unsigned long n)
 {
-       might_sleep();
+       might_fault();
        if (__builtin_constant_p(n)) {
                unsigned long ret;
 
index f8cfd00db450f2e0f948ce7128a2d44867aebf59..84210c479fca83524c6cef4c6bc069bcff76e272 100644 (file)
@@ -29,6 +29,8 @@ static __always_inline __must_check
 int __copy_from_user(void *dst, const void __user *src, unsigned size)
 {
        int ret = 0;
+
+       might_fault();
        if (!__builtin_constant_p(size))
                return copy_user_generic(dst, (__force void *)src, size);
        switch (size) {
@@ -71,6 +73,8 @@ static __always_inline __must_check
 int __copy_to_user(void __user *dst, const void *src, unsigned size)
 {
        int ret = 0;
+
+       might_fault();
        if (!__builtin_constant_p(size))
                return copy_user_generic((__force void *)dst, src, size);
        switch (size) {
@@ -113,6 +117,8 @@ static __always_inline __must_check
 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
 {
        int ret = 0;
+
+       might_fault();
        if (!__builtin_constant_p(size))
                return copy_user_generic((__force void *)dst,
                                         (__force void *)src, size);
index 88dd768eab6d34980d14aff41178115ed881045e..d364df03c1d6419ce4473e23b420d86167343775 100644 (file)
@@ -109,6 +109,8 @@ obj-$(CONFIG_MICROCODE)                     += microcode.o
 
 obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
 
+obj-$(CONFIG_SWIOTLB)                  += pci-swiotlb_64.o # NB rename without _64
+
 ###
 # 64 bit specific files
 ifeq ($(CONFIG_X86_64),y)
@@ -122,7 +124,6 @@ ifeq ($(CONFIG_X86_64),y)
         obj-$(CONFIG_GART_IOMMU)       += pci-gart_64.o aperture_64.o
         obj-$(CONFIG_CALGARY_IOMMU)    += pci-calgary_64.o tce_64.o
         obj-$(CONFIG_AMD_IOMMU)                += amd_iommu_init.o amd_iommu.o
-        obj-$(CONFIG_SWIOTLB)          += pci-swiotlb_64.o
 
         obj-$(CONFIG_PCI_MMCONFIG)     += mmconf-fam10h_64.o
 endif
index 3f0a3edf0a573a2f5f2d1051b32285ebffcdbb36..845ea097383ee4051a24b54bca8da4c29bd9f6d1 100644 (file)
@@ -813,7 +813,7 @@ int __init hpet_enable(void)
 
 out_nohpet:
        hpet_clear_mapping();
-       boot_hpet_disable = 1;
+       hpet_address = 0;
        return 0;
 }
 
@@ -836,10 +836,11 @@ static __init int hpet_late_init(void)
 
                hpet_address = force_hpet_address;
                hpet_enable();
-               if (!hpet_virt_address)
-                       return -ENODEV;
        }
 
+       if (!hpet_virt_address)
+               return -ENODEV;
+
        hpet_reserve_platform_timers(hpet_readl(HPET_ID));
 
        for_each_online_cpu(cpu) {
index 679e7bbbbcd6b07230a2e7fdbd2ebe38e04f0563..f6ea94b74da146072cca138aa824ed5a3d5eeec7 100644 (file)
@@ -108,93 +108,252 @@ static int __init parse_noapic(char *str)
 early_param("noapic", parse_noapic);
 
 struct irq_pin_list;
+
+/*
+ * This is performance-critical, we want to do it O(1)
+ *
+ * the indexing order of this array favors 1:1 mappings
+ * between pins and IRQs.
+ */
+
+struct irq_pin_list {
+       int apic, pin;
+       struct irq_pin_list *next;
+};
+
+static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
+{
+       struct irq_pin_list *pin;
+       int node;
+
+       node = cpu_to_node(cpu);
+
+       pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
+       printk(KERN_DEBUG "  alloc irq_2_pin on cpu %d node %d\n", cpu, node);
+
+       return pin;
+}
+
 struct irq_cfg {
-       unsigned int irq;
        struct irq_pin_list *irq_2_pin;
        cpumask_t domain;
        cpumask_t old_domain;
        unsigned move_cleanup_count;
        u8 vector;
        u8 move_in_progress : 1;
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+       u8 move_desc_pending : 1;
+#endif
 };
 
 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
+#ifdef CONFIG_SPARSE_IRQ
+static struct irq_cfg irq_cfgx[] = {
+#else
 static struct irq_cfg irq_cfgx[NR_IRQS] = {
-       [0]  = { .irq =  0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR,  },
-       [1]  = { .irq =  1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR,  },
-       [2]  = { .irq =  2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR,  },
-       [3]  = { .irq =  3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR,  },
-       [4]  = { .irq =  4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR,  },
-       [5]  = { .irq =  5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR,  },
-       [6]  = { .irq =  6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR,  },
-       [7]  = { .irq =  7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR,  },
-       [8]  = { .irq =  8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR,  },
-       [9]  = { .irq =  9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR,  },
-       [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
-       [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
-       [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
-       [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
-       [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
-       [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
+#endif
+       [0]  = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR,  },
+       [1]  = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR,  },
+       [2]  = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR,  },
+       [3]  = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR,  },
+       [4]  = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR,  },
+       [5]  = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR,  },
+       [6]  = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR,  },
+       [7]  = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR,  },
+       [8]  = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR,  },
+       [9]  = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR,  },
+       [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
+       [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
+       [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
+       [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
+       [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
+       [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
 };
 
-#define for_each_irq_cfg(irq, cfg)             \
-       for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++)
+void __init arch_early_irq_init(void)
+{
+       struct irq_cfg *cfg;
+       struct irq_desc *desc;
+       int count;
+       int i;
+
+       cfg = irq_cfgx;
+       count = ARRAY_SIZE(irq_cfgx);
+
+       for (i = 0; i < count; i++) {
+               desc = irq_to_desc(i);
+               desc->chip_data = &cfg[i];
+       }
+}
 
+#ifdef CONFIG_SPARSE_IRQ
 static struct irq_cfg *irq_cfg(unsigned int irq)
 {
-       return irq < nr_irqs ? irq_cfgx + irq : NULL;
+       struct irq_cfg *cfg = NULL;
+       struct irq_desc *desc;
+
+       desc = irq_to_desc(irq);
+       if (desc)
+               cfg = desc->chip_data;
+
+       return cfg;
 }
 
-static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
+static struct irq_cfg *get_one_free_irq_cfg(int cpu)
 {
-       return irq_cfg(irq);
+       struct irq_cfg *cfg;
+       int node;
+
+       node = cpu_to_node(cpu);
+
+       cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
+       printk(KERN_DEBUG "  alloc irq_cfg on cpu %d node %d\n", cpu, node);
+
+       return cfg;
 }
 
-/*
- * Rough estimation of how many shared IRQs there are, can be changed
- * anytime.
- */
-#define MAX_PLUS_SHARED_IRQS NR_IRQS
-#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
+void arch_init_chip_data(struct irq_desc *desc, int cpu)
+{
+       struct irq_cfg *cfg;
 
-/*
- * This is performance-critical, we want to do it O(1)
- *
- * the indexing order of this array favors 1:1 mappings
- * between pins and IRQs.
- */
+       cfg = desc->chip_data;
+       if (!cfg) {
+               desc->chip_data = get_one_free_irq_cfg(cpu);
+               if (!desc->chip_data) {
+                       printk(KERN_ERR "can not alloc irq_cfg\n");
+                       BUG_ON(1);
+               }
+       }
+}
 
-struct irq_pin_list {
-       int apic, pin;
-       struct irq_pin_list *next;
-};
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+
+static void
+init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu)
+{
+       struct irq_pin_list *old_entry, *head, *tail, *entry;
+
+       cfg->irq_2_pin = NULL;
+       old_entry = old_cfg->irq_2_pin;
+       if (!old_entry)
+               return;
 
-static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE];
-static struct irq_pin_list *irq_2_pin_ptr;
+       entry = get_one_free_irq_2_pin(cpu);
+       if (!entry)
+               return;
 
-static void __init irq_2_pin_init(void)
+       entry->apic     = old_entry->apic;
+       entry->pin      = old_entry->pin;
+       head            = entry;
+       tail            = entry;
+       old_entry       = old_entry->next;
+       while (old_entry) {
+               entry = get_one_free_irq_2_pin(cpu);
+               if (!entry) {
+                       entry = head;
+                       while (entry) {
+                               head = entry->next;
+                               kfree(entry);
+                               entry = head;
+                       }
+                       /* still use the old one */
+                       return;
+               }
+               entry->apic     = old_entry->apic;
+               entry->pin      = old_entry->pin;
+               tail->next      = entry;
+               tail            = entry;
+               old_entry       = old_entry->next;
+       }
+
+       tail->next = NULL;
+       cfg->irq_2_pin = head;
+}
+
+static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg)
 {
-       struct irq_pin_list *pin = irq_2_pin_head;
-       int i;
+       struct irq_pin_list *entry, *next;
 
-       for (i = 1; i < PIN_MAP_SIZE; i++)
-               pin[i-1].next = &pin[i];
+       if (old_cfg->irq_2_pin == cfg->irq_2_pin)
+               return;
+
+       entry = old_cfg->irq_2_pin;
 
-       irq_2_pin_ptr = &pin[0];
+       while (entry) {
+               next = entry->next;
+               kfree(entry);
+               entry = next;
+       }
+       old_cfg->irq_2_pin = NULL;
 }
 
-static struct irq_pin_list *get_one_free_irq_2_pin(void)
+void arch_init_copy_chip_data(struct irq_desc *old_desc,
+                                struct irq_desc *desc, int cpu)
 {
-       struct irq_pin_list *pin = irq_2_pin_ptr;
+       struct irq_cfg *cfg;
+       struct irq_cfg *old_cfg;
 
-       if (!pin)
-               panic("can not get more irq_2_pin\n");
+       cfg = get_one_free_irq_cfg(cpu);
 
-       irq_2_pin_ptr = pin->next;
-       pin->next = NULL;
-       return pin;
+       if (!cfg)
+               return;
+
+       desc->chip_data = cfg;
+
+       old_cfg = old_desc->chip_data;
+
+       memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
+
+       init_copy_irq_2_pin(old_cfg, cfg, cpu);
+}
+
+static void free_irq_cfg(struct irq_cfg *old_cfg)
+{
+       kfree(old_cfg);
+}
+
+void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
+{
+       struct irq_cfg *old_cfg, *cfg;
+
+       old_cfg = old_desc->chip_data;
+       cfg = desc->chip_data;
+
+       if (old_cfg == cfg)
+               return;
+
+       if (old_cfg) {
+               free_irq_2_pin(old_cfg, cfg);
+               free_irq_cfg(old_cfg);
+               old_desc->chip_data = NULL;
+       }
+}
+
+static void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask)
+{
+       struct irq_cfg *cfg = desc->chip_data;
+
+       if (!cfg->move_in_progress) {
+               /* it means that domain is not changed */
+               if (!cpus_intersects(desc->affinity, mask))
+                       cfg->move_desc_pending = 1;
+       }
 }
+#endif
+
+#else
+static struct irq_cfg *irq_cfg(unsigned int irq)
+{
+       return irq < nr_irqs ? irq_cfgx + irq : NULL;
+}
+
+#endif
+
+#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC
+static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask)
+{
+}
+#endif
 
 struct io_apic {
        unsigned int index;
@@ -237,11 +396,10 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
        writel(value, &io_apic->data);
 }
 
-static bool io_apic_level_ack_pending(unsigned int irq)
+static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
 {
        struct irq_pin_list *entry;
        unsigned long flags;
-       struct irq_cfg *cfg = irq_cfg(irq);
 
        spin_lock_irqsave(&ioapic_lock, flags);
        entry = cfg->irq_2_pin;
@@ -323,13 +481,12 @@ static void ioapic_mask_entry(int apic, int pin)
 }
 
 #ifdef CONFIG_SMP
-static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
+static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
 {
        int apic, pin;
-       struct irq_cfg *cfg;
        struct irq_pin_list *entry;
+       u8 vector = cfg->vector;
 
-       cfg = irq_cfg(irq);
        entry = cfg->irq_2_pin;
        for (;;) {
                unsigned int reg;
@@ -359,24 +516,27 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
        }
 }
 
-static int assign_irq_vector(int irq, cpumask_t mask);
+static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask);
 
-static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
+static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
 {
        struct irq_cfg *cfg;
        unsigned long flags;
        unsigned int dest;
        cpumask_t tmp;
-       struct irq_desc *desc;
+       unsigned int irq;
 
        cpus_and(tmp, mask, cpu_online_map);
        if (cpus_empty(tmp))
                return;
 
-       cfg = irq_cfg(irq);
-       if (assign_irq_vector(irq, mask))
+       irq = desc->irq;
+       cfg = desc->chip_data;
+       if (assign_irq_vector(irq, cfg, mask))
                return;
 
+       set_extra_move_desc(desc, mask);
+
        cpus_and(tmp, cfg->domain, mask);
        dest = cpu_mask_to_apicid(tmp);
        /*
@@ -384,12 +544,20 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
         */
        dest = SET_APIC_LOGICAL_ID(dest);
 
-       desc = irq_to_desc(irq);
        spin_lock_irqsave(&ioapic_lock, flags);
-       __target_IO_APIC_irq(irq, dest, cfg->vector);
+       __target_IO_APIC_irq(irq, dest, cfg);
        desc->affinity = mask;
        spin_unlock_irqrestore(&ioapic_lock, flags);
 }
+
+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
+{
+       struct irq_desc *desc;
+
+       desc = irq_to_desc(irq);
+
+       set_ioapic_affinity_irq_desc(desc, mask);
+}
 #endif /* CONFIG_SMP */
 
 /*
@@ -397,16 +565,18 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
  * shared ISA-space IRQs, so we have to support them. We are super
  * fast in the common case, and fast for shared ISA-space IRQs.
  */
-static void add_pin_to_irq(unsigned int irq, int apic, int pin)
+static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin)
 {
-       struct irq_cfg *cfg;
        struct irq_pin_list *entry;
 
-       /* first time to refer irq_cfg, so with new */
-       cfg = irq_cfg_alloc(irq);
        entry = cfg->irq_2_pin;
        if (!entry) {
-               entry = get_one_free_irq_2_pin();
+               entry = get_one_free_irq_2_pin(cpu);
+               if (!entry) {
+                       printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n",
+                                       apic, pin);
+                       return;
+               }
                cfg->irq_2_pin = entry;
                entry->apic = apic;
                entry->pin = pin;
@@ -421,7 +591,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
                entry = entry->next;
        }
 
-       entry->next = get_one_free_irq_2_pin();
+       entry->next = get_one_free_irq_2_pin(cpu);
        entry = entry->next;
        entry->apic = apic;
        entry->pin = pin;
@@ -430,11 +600,10 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
 /*
  * Reroute an IRQ to a different pin.
  */
-static void __init replace_pin_at_irq(unsigned int irq,
+static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu,
                                      int oldapic, int oldpin,
                                      int newapic, int newpin)
 {
-       struct irq_cfg *cfg = irq_cfg(irq);
        struct irq_pin_list *entry = cfg->irq_2_pin;
        int replaced = 0;
 
@@ -451,18 +620,16 @@ static void __init replace_pin_at_irq(unsigned int irq,
 
        /* why? call replace before add? */
        if (!replaced)
-               add_pin_to_irq(irq, newapic, newpin);
+               add_pin_to_irq_cpu(cfg, cpu, newapic, newpin);
 }
 
-static inline void io_apic_modify_irq(unsigned int irq,
+static inline void io_apic_modify_irq(struct irq_cfg *cfg,
                                int mask_and, int mask_or,
                                void (*final)(struct irq_pin_list *entry))
 {
        int pin;
-       struct irq_cfg *cfg;
        struct irq_pin_list *entry;
 
-       cfg = irq_cfg(irq);
        for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
                unsigned int reg;
                pin = entry->pin;
@@ -475,9 +642,9 @@ static inline void io_apic_modify_irq(unsigned int irq,
        }
 }
 
-static void __unmask_IO_APIC_irq(unsigned int irq)
+static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
 {
-       io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 0, NULL);
+       io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
 }
 
 #ifdef CONFIG_X86_64
@@ -492,47 +659,64 @@ void io_apic_sync(struct irq_pin_list *entry)
        readl(&io_apic->data);
 }
 
-static void __mask_IO_APIC_irq(unsigned int irq)
+static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
 {
-       io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
+       io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
 }
 #else /* CONFIG_X86_32 */
-static void __mask_IO_APIC_irq(unsigned int irq)
+static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
 {
-       io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, NULL);
+       io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL);
 }
 
-static void __mask_and_edge_IO_APIC_irq(unsigned int irq)
+static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg)
 {
-       io_apic_modify_irq(irq, ~IO_APIC_REDIR_LEVEL_TRIGGER,
+       io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER,
                        IO_APIC_REDIR_MASKED, NULL);
 }
 
-static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
+static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg)
 {
-       io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED,
+       io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED,
                        IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
 }
 #endif /* CONFIG_X86_32 */
 
-static void mask_IO_APIC_irq (unsigned int irq)
+static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
 {
+       struct irq_cfg *cfg = desc->chip_data;
        unsigned long flags;
 
+       BUG_ON(!cfg);
+
        spin_lock_irqsave(&ioapic_lock, flags);
-       __mask_IO_APIC_irq(irq);
+       __mask_IO_APIC_irq(cfg);
        spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
-static void unmask_IO_APIC_irq (unsigned int irq)
+static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
 {
+       struct irq_cfg *cfg = desc->chip_data;
        unsigned long flags;
 
        spin_lock_irqsave(&ioapic_lock, flags);
-       __unmask_IO_APIC_irq(irq);
+       __unmask_IO_APIC_irq(cfg);
        spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
+static void mask_IO_APIC_irq(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       mask_IO_APIC_irq_desc(desc);
+}
+static void unmask_IO_APIC_irq(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       unmask_IO_APIC_irq_desc(desc);
+}
+
 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
 {
        struct IO_APIC_route_entry entry;
@@ -809,7 +993,7 @@ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
  */
 static int EISA_ELCR(unsigned int irq)
 {
-       if (irq < 16) {
+       if (irq < NR_IRQS_LEGACY) {
                unsigned int port = 0x4d0 + (irq >> 3);
                return (inb(port) >> (irq & 7)) & 1;
        }
@@ -1034,7 +1218,7 @@ void unlock_vector_lock(void)
        spin_unlock(&vector_lock);
 }
 
-static int __assign_irq_vector(int irq, cpumask_t mask)
+static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
 {
        /*
         * NOTE! The local APIC isn't very good at handling
@@ -1050,16 +1234,13 @@ static int __assign_irq_vector(int irq, cpumask_t mask)
        static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
        unsigned int old_vector;
        int cpu;
-       struct irq_cfg *cfg;
 
-       cfg = irq_cfg(irq);
+       if ((cfg->move_in_progress) || cfg->move_cleanup_count)
+               return -EBUSY;
 
        /* Only try and allocate irqs on cpus that are present */
        cpus_and(mask, mask, cpu_online_map);
 
-       if ((cfg->move_in_progress) || cfg->move_cleanup_count)
-               return -EBUSY;
-
        old_vector = cfg->vector;
        if (old_vector) {
                cpumask_t tmp;
@@ -1113,24 +1294,22 @@ next:
        return -ENOSPC;
 }
 
-static int assign_irq_vector(int irq, cpumask_t mask)
+static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
 {
        int err;
        unsigned long flags;
 
        spin_lock_irqsave(&vector_lock, flags);
-       err = __assign_irq_vector(irq, mask);
+       err = __assign_irq_vector(irq, cfg, mask);
        spin_unlock_irqrestore(&vector_lock, flags);
        return err;
 }
 
-static void __clear_irq_vector(int irq)
+static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
 {
-       struct irq_cfg *cfg;
        cpumask_t mask;
        int cpu, vector;
 
-       cfg = irq_cfg(irq);
        BUG_ON(!cfg->vector);
 
        vector = cfg->vector;
@@ -1162,9 +1341,13 @@ void __setup_vector_irq(int cpu)
        /* This function must be called with vector_lock held */
        int irq, vector;
        struct irq_cfg *cfg;
+       struct irq_desc *desc;
 
        /* Mark the inuse vectors */
-       for_each_irq_cfg(irq, cfg) {
+       for_each_irq_desc(irq, desc) {
+               if (!desc)
+                       continue;
+               cfg = desc->chip_data;
                if (!cpu_isset(cpu, cfg->domain))
                        continue;
                vector = cfg->vector;
@@ -1215,11 +1398,8 @@ static inline int IO_APIC_irq_trigger(int irq)
 }
 #endif
 
-static void ioapic_register_intr(int irq, unsigned long trigger)
+static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger)
 {
-       struct irq_desc *desc;
-
-       desc = irq_to_desc(irq);
 
        if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
            trigger == IOAPIC_LEVEL)
@@ -1311,7 +1491,7 @@ static int setup_ioapic_entry(int apic, int irq,
        return 0;
 }
 
-static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
+static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc,
                              int trigger, int polarity)
 {
        struct irq_cfg *cfg;
@@ -1321,10 +1501,10 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
        if (!IO_APIC_IRQ(irq))
                return;
 
-       cfg = irq_cfg(irq);
+       cfg = desc->chip_data;
 
        mask = TARGET_CPUS;
-       if (assign_irq_vector(irq, mask))
+       if (assign_irq_vector(irq, cfg, mask))
                return;
 
        cpus_and(mask, cfg->domain, mask);
@@ -1341,12 +1521,12 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
                               cfg->vector)) {
                printk("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
                       mp_ioapics[apic].mp_apicid, pin);
-               __clear_irq_vector(irq);
+               __clear_irq_vector(irq, cfg);
                return;
        }
 
-       ioapic_register_intr(irq, trigger);
-       if (irq < 16)
+       ioapic_register_intr(irq, desc, trigger);
+       if (irq < NR_IRQS_LEGACY)
                disable_8259A_irq(irq);
 
        ioapic_write_entry(apic, pin, entry);
@@ -1356,6 +1536,9 @@ static void __init setup_IO_APIC_irqs(void)
 {
        int apic, pin, idx, irq;
        int notcon = 0;
+       struct irq_desc *desc;
+       struct irq_cfg *cfg;
+       int cpu = boot_cpu_id;
 
        apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
 
@@ -1387,9 +1570,15 @@ static void __init setup_IO_APIC_irqs(void)
                        if (multi_timer_check(apic, irq))
                                continue;
 #endif
-                       add_pin_to_irq(irq, apic, pin);
+                       desc = irq_to_desc_alloc_cpu(irq, cpu);
+                       if (!desc) {
+                               printk(KERN_INFO "can not get irq_desc for %d\n", irq);
+                               continue;
+                       }
+                       cfg = desc->chip_data;
+                       add_pin_to_irq_cpu(cfg, cpu, apic, pin);
 
-                       setup_IO_APIC_irq(apic, pin, irq,
+                       setup_IO_APIC_irq(apic, pin, irq, desc,
                                        irq_trigger(idx), irq_polarity(idx));
                }
        }
@@ -1448,6 +1637,7 @@ __apicdebuginit(void) print_IO_APIC(void)
        union IO_APIC_reg_03 reg_03;
        unsigned long flags;
        struct irq_cfg *cfg;
+       struct irq_desc *desc;
        unsigned int irq;
 
        if (apic_verbosity == APIC_QUIET)
@@ -1537,8 +1727,13 @@ __apicdebuginit(void) print_IO_APIC(void)
        }
        }
        printk(KERN_DEBUG "IRQ to pin mappings:\n");
-       for_each_irq_cfg(irq, cfg) {
-               struct irq_pin_list *entry = cfg->irq_2_pin;
+       for_each_irq_desc(irq, desc) {
+               struct irq_pin_list *entry;
+
+               if (!desc)
+                       continue;
+               cfg = desc->chip_data;
+               entry = cfg->irq_2_pin;
                if (!entry)
                        continue;
                printk(KERN_DEBUG "IRQ%d ", irq);
@@ -2022,14 +2217,16 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
 {
        int was_pending = 0;
        unsigned long flags;
+       struct irq_cfg *cfg;
 
        spin_lock_irqsave(&ioapic_lock, flags);
-       if (irq < 16) {
+       if (irq < NR_IRQS_LEGACY) {
                disable_8259A_irq(irq);
                if (i8259A_irq_pending(irq))
                        was_pending = 1;
        }
-       __unmask_IO_APIC_irq(irq);
+       cfg = irq_cfg(irq);
+       __unmask_IO_APIC_irq(cfg);
        spin_unlock_irqrestore(&ioapic_lock, flags);
 
        return was_pending;
@@ -2092,35 +2289,37 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
  * as simple as edge triggered migration and we can do the irq migration
  * with a simple atomic update to IO-APIC RTE.
  */
-static void migrate_ioapic_irq(int irq, cpumask_t mask)
+static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
 {
        struct irq_cfg *cfg;
-       struct irq_desc *desc;
        cpumask_t tmp, cleanup_mask;
        struct irte irte;
        int modify_ioapic_rte;
        unsigned int dest;
        unsigned long flags;
+       unsigned int irq;
 
        cpus_and(tmp, mask, cpu_online_map);
        if (cpus_empty(tmp))
                return;
 
+       irq = desc->irq;
        if (get_irte(irq, &irte))
                return;
 
-       if (assign_irq_vector(irq, mask))
+       cfg = desc->chip_data;
+       if (assign_irq_vector(irq, cfg, mask))
                return;
 
-       cfg = irq_cfg(irq);
+       set_extra_move_desc(desc, mask);
+
        cpus_and(tmp, cfg->domain, mask);
        dest = cpu_mask_to_apicid(tmp);
 
-       desc = irq_to_desc(irq);
        modify_ioapic_rte = desc->status & IRQ_LEVEL;
        if (modify_ioapic_rte) {
                spin_lock_irqsave(&ioapic_lock, flags);
-               __target_IO_APIC_irq(irq, dest, cfg->vector);
+               __target_IO_APIC_irq(irq, dest, cfg);
                spin_unlock_irqrestore(&ioapic_lock, flags);
        }
 
@@ -2142,14 +2341,14 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask)
        desc->affinity = mask;
 }
 
-static int migrate_irq_remapped_level(int irq)
+static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
 {
        int ret = -1;
-       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_cfg *cfg = desc->chip_data;
 
-       mask_IO_APIC_irq(irq);
+       mask_IO_APIC_irq_desc(desc);
 
-       if (io_apic_level_ack_pending(irq)) {
+       if (io_apic_level_ack_pending(cfg)) {
                /*
                 * Interrupt in progress. Migrating irq now will change the
                 * vector information in the IO-APIC RTE and that will confuse
@@ -2161,14 +2360,15 @@ static int migrate_irq_remapped_level(int irq)
        }
 
        /* everthing is clear. we have right of way */
-       migrate_ioapic_irq(irq, desc->pending_mask);
+       migrate_ioapic_irq_desc(desc, desc->pending_mask);
 
        ret = 0;
        desc->status &= ~IRQ_MOVE_PENDING;
        cpus_clear(desc->pending_mask);
 
 unmask:
-       unmask_IO_APIC_irq(irq);
+       unmask_IO_APIC_irq_desc(desc);
+
        return ret;
 }
 
@@ -2178,6 +2378,9 @@ static void ir_irq_migration(struct work_struct *work)
        struct irq_desc *desc;
 
        for_each_irq_desc(irq, desc) {
+               if (!desc)
+                       continue;
+
                if (desc->status & IRQ_MOVE_PENDING) {
                        unsigned long flags;
 
@@ -2198,18 +2401,22 @@ static void ir_irq_migration(struct work_struct *work)
 /*
  * Migrates the IRQ destination in the process context.
  */
-static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
+static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
-
        if (desc->status & IRQ_LEVEL) {
                desc->status |= IRQ_MOVE_PENDING;
                desc->pending_mask = mask;
-               migrate_irq_remapped_level(irq);
+               migrate_irq_remapped_level_desc(desc);
                return;
        }
 
-       migrate_ioapic_irq(irq, mask);
+       migrate_ioapic_irq_desc(desc, mask);
+}
+static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       set_ir_ioapic_affinity_irq_desc(desc, mask);
 }
 #endif
 
@@ -2228,6 +2435,9 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
                struct irq_cfg *cfg;
                irq = __get_cpu_var(vector_irq)[vector];
 
+               if (irq == -1)
+                       continue;
+
                desc = irq_to_desc(irq);
                if (!desc)
                        continue;
@@ -2249,19 +2459,40 @@ unlock:
        irq_exit();
 }
 
-static void irq_complete_move(unsigned int irq)
+static void irq_complete_move(struct irq_desc **descp)
 {
-       struct irq_cfg *cfg = irq_cfg(irq);
+       struct irq_desc *desc = *descp;
+       struct irq_cfg *cfg = desc->chip_data;
        unsigned vector, me;
 
-       if (likely(!cfg->move_in_progress))
+       if (likely(!cfg->move_in_progress)) {
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+               if (likely(!cfg->move_desc_pending))
+                       return;
+
+               /* domain has not changed, but affinity did */
+               me = smp_processor_id();
+               if (cpu_isset(me, desc->affinity)) {
+                       *descp = desc = move_irq_desc(desc, me);
+                       /* get the new one */
+                       cfg = desc->chip_data;
+                       cfg->move_desc_pending = 0;
+               }
+#endif
                return;
+       }
 
        vector = ~get_irq_regs()->orig_ax;
        me = smp_processor_id();
        if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
                cpumask_t cleanup_mask;
 
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+               *descp = desc = move_irq_desc(desc, me);
+               /* get the new one */
+               cfg = desc->chip_data;
+#endif
+
                cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
                cfg->move_cleanup_count = cpus_weight(cleanup_mask);
                send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
@@ -2269,8 +2500,9 @@ static void irq_complete_move(unsigned int irq)
        }
 }
 #else
-static inline void irq_complete_move(unsigned int irq) {}
+static inline void irq_complete_move(struct irq_desc **descp) {}
 #endif
+
 #ifdef CONFIG_INTR_REMAP
 static void ack_x2apic_level(unsigned int irq)
 {
@@ -2281,11 +2513,14 @@ static void ack_x2apic_edge(unsigned int irq)
 {
        ack_x2APIC_irq();
 }
+
 #endif
 
 static void ack_apic_edge(unsigned int irq)
 {
-       irq_complete_move(irq);
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       irq_complete_move(&desc);
        move_native_irq(irq);
        ack_APIC_irq();
 }
@@ -2294,18 +2529,21 @@ atomic_t irq_mis_count;
 
 static void ack_apic_level(unsigned int irq)
 {
+       struct irq_desc *desc = irq_to_desc(irq);
+
 #ifdef CONFIG_X86_32
        unsigned long v;
        int i;
 #endif
+       struct irq_cfg *cfg;
        int do_unmask_irq = 0;
 
-       irq_complete_move(irq);
+       irq_complete_move(&desc);
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        /* If we are moving the irq we need to mask it */
-       if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
+       if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
                do_unmask_irq = 1;
-               mask_IO_APIC_irq(irq);
+               mask_IO_APIC_irq_desc(desc);
        }
 #endif
 
@@ -2329,7 +2567,8 @@ static void ack_apic_level(unsigned int irq)
        * operation to prevent an edge-triggered interrupt escaping meanwhile.
        * The idea is from Manfred Spraul.  --macro
        */
-       i = irq_cfg(irq)->vector;
+       cfg = desc->chip_data;
+       i = cfg->vector;
 
        v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
 #endif
@@ -2368,17 +2607,18 @@ static void ack_apic_level(unsigned int irq)
                 * accurate and is causing problems then it is a hardware bug
                 * and you can go talk to the chipset vendor about it.
                 */
-               if (!io_apic_level_ack_pending(irq))
+               cfg = desc->chip_data;
+               if (!io_apic_level_ack_pending(cfg))
                        move_masked_irq(irq);
-               unmask_IO_APIC_irq(irq);
+               unmask_IO_APIC_irq_desc(desc);
        }
 
 #ifdef CONFIG_X86_32
        if (!(v & (1 << (i & 0x1f)))) {
                atomic_inc(&irq_mis_count);
                spin_lock(&ioapic_lock);
-               __mask_and_edge_IO_APIC_irq(irq);
-               __unmask_and_level_IO_APIC_irq(irq);
+               __mask_and_edge_IO_APIC_irq(cfg);
+               __unmask_and_level_IO_APIC_irq(cfg);
                spin_unlock(&ioapic_lock);
        }
 #endif
@@ -2429,20 +2669,22 @@ static inline void init_IO_APIC_traps(void)
         * Also, we've got to be careful not to trash gate
         * 0x80, because int 0x80 is hm, kind of importantish. ;)
         */
-       for_each_irq_cfg(irq, cfg) {
-               if (IO_APIC_IRQ(irq) && !cfg->vector) {
+       for_each_irq_desc(irq, desc) {
+               if (!desc)
+                       continue;
+
+               cfg = desc->chip_data;
+               if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
                        /*
                         * Hmm.. We don't have an entry for this,
                         * so default to an old-fashioned 8259
                         * interrupt if we can..
                         */
-                       if (irq < 16)
+                       if (irq < NR_IRQS_LEGACY)
                                make_8259A_irq(irq);
-                       else {
-                               desc = irq_to_desc(irq);
+                       else
                                /* Strange. Oh, well.. */
                                desc->chip = &no_irq_chip;
-                       }
                }
        }
 }
@@ -2467,7 +2709,7 @@ static void unmask_lapic_irq(unsigned int irq)
        apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
 }
 
-static void ack_lapic_irq (unsigned int irq)
+static void ack_lapic_irq(unsigned int irq)
 {
        ack_APIC_irq();
 }
@@ -2479,11 +2721,8 @@ static struct irq_chip lapic_chip __read_mostly = {
        .ack            = ack_lapic_irq,
 };
 
-static void lapic_register_intr(int irq)
+static void lapic_register_intr(int irq, struct irq_desc *desc)
 {
-       struct irq_desc *desc;
-
-       desc = irq_to_desc(irq);
        desc->status &= ~IRQ_LEVEL;
        set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
                                      "edge");
@@ -2587,7 +2826,9 @@ int timer_through_8259 __initdata;
  */
 static inline void __init check_timer(void)
 {
-       struct irq_cfg *cfg = irq_cfg(0);
+       struct irq_desc *desc = irq_to_desc(0);
+       struct irq_cfg *cfg = desc->chip_data;
+       int cpu = boot_cpu_id;
        int apic1, pin1, apic2, pin2;
        unsigned long flags;
        unsigned int ver;
@@ -2602,7 +2843,7 @@ static inline void __init check_timer(void)
         * get/set the timer IRQ vector:
         */
        disable_8259A_irq(0);
-       assign_irq_vector(0, TARGET_CPUS);
+       assign_irq_vector(0, cfg, TARGET_CPUS);
 
        /*
         * As IRQ0 is to be enabled in the 8259A, the virtual
@@ -2653,10 +2894,10 @@ static inline void __init check_timer(void)
                 * Ok, does IRQ0 through the IOAPIC work?
                 */
                if (no_pin1) {
-                       add_pin_to_irq(0, apic1, pin1);
+                       add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
                        setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
                }
-               unmask_IO_APIC_irq(0);
+               unmask_IO_APIC_irq_desc(desc);
                if (timer_irq_works()) {
                        if (nmi_watchdog == NMI_IO_APIC) {
                                setup_nmi();
@@ -2682,9 +2923,9 @@ static inline void __init check_timer(void)
                /*
                 * legacy devices should be connected to IO APIC #0
                 */
-               replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
+               replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
                setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
-               unmask_IO_APIC_irq(0);
+               unmask_IO_APIC_irq_desc(desc);
                enable_8259A_irq(0);
                if (timer_irq_works()) {
                        apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@ -2716,7 +2957,7 @@ static inline void __init check_timer(void)
        apic_printk(APIC_QUIET, KERN_INFO
                    "...trying to set up timer as Virtual Wire IRQ...\n");
 
-       lapic_register_intr(0);
+       lapic_register_intr(0, desc);
        apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);     /* Fixed mode */
        enable_8259A_irq(0);
 
@@ -2901,22 +3142,26 @@ unsigned int create_irq_nr(unsigned int irq_want)
        unsigned int irq;
        unsigned int new;
        unsigned long flags;
-       struct irq_cfg *cfg_new;
-
-       irq_want = nr_irqs - 1;
+       struct irq_cfg *cfg_new = NULL;
+       int cpu = boot_cpu_id;
+       struct irq_desc *desc_new = NULL;
 
        irq = 0;
        spin_lock_irqsave(&vector_lock, flags);
-       for (new = irq_want; new > 0; new--) {
+       for (new = irq_want; new < NR_IRQS; new++) {
                if (platform_legacy_irq(new))
                        continue;
-               cfg_new = irq_cfg(new);
-               if (cfg_new && cfg_new->vector != 0)
+
+               desc_new = irq_to_desc_alloc_cpu(new, cpu);
+               if (!desc_new) {
+                       printk(KERN_INFO "can not get irq_desc for %d\n", new);
                        continue;
-               /* check if need to create one */
-               if (!cfg_new)
-                       cfg_new = irq_cfg_alloc(new);
-               if (__assign_irq_vector(new, TARGET_CPUS) == 0)
+               }
+               cfg_new = desc_new->chip_data;
+
+               if (cfg_new->vector != 0)
+                       continue;
+               if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0)
                        irq = new;
                break;
        }
@@ -2924,15 +3169,21 @@ unsigned int create_irq_nr(unsigned int irq_want)
 
        if (irq > 0) {
                dynamic_irq_init(irq);
+               /* restore it, in case dynamic_irq_init clear it */
+               if (desc_new)
+                       desc_new->chip_data = cfg_new;
        }
        return irq;
 }
 
+static int nr_irqs_gsi = NR_IRQS_LEGACY;
 int create_irq(void)
 {
+       unsigned int irq_want;
        int irq;
 
-       irq = create_irq_nr(nr_irqs - 1);
+       irq_want = nr_irqs_gsi;
+       irq = create_irq_nr(irq_want);
 
        if (irq == 0)
                irq = -1;
@@ -2943,14 +3194,22 @@ int create_irq(void)
 void destroy_irq(unsigned int irq)
 {
        unsigned long flags;
+       struct irq_cfg *cfg;
+       struct irq_desc *desc;
 
+       /* store it, in case dynamic_irq_cleanup clear it */
+       desc = irq_to_desc(irq);
+       cfg = desc->chip_data;
        dynamic_irq_cleanup(irq);
+       /* connect back irq_cfg */
+       if (desc)
+               desc->chip_data = cfg;
 
 #ifdef CONFIG_INTR_REMAP
        free_irte(irq);
 #endif
        spin_lock_irqsave(&vector_lock, flags);
-       __clear_irq_vector(irq);
+       __clear_irq_vector(irq, cfg);
        spin_unlock_irqrestore(&vector_lock, flags);
 }
 
@@ -2965,12 +3224,12 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
        unsigned dest;
        cpumask_t tmp;
 
+       cfg = irq_cfg(irq);
        tmp = TARGET_CPUS;
-       err = assign_irq_vector(irq, tmp);
+       err = assign_irq_vector(irq, cfg, tmp);
        if (err)
                return err;
 
-       cfg = irq_cfg(irq);
        cpus_and(tmp, cfg->domain, tmp);
        dest = cpu_mask_to_apicid(tmp);
 
@@ -3028,35 +3287,35 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
 #ifdef CONFIG_SMP
 static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
 {
+       struct irq_desc *desc = irq_to_desc(irq);
        struct irq_cfg *cfg;
        struct msi_msg msg;
        unsigned int dest;
        cpumask_t tmp;
-       struct irq_desc *desc;
 
        cpus_and(tmp, mask, cpu_online_map);
        if (cpus_empty(tmp))
                return;
 
-       if (assign_irq_vector(irq, mask))
+       cfg = desc->chip_data;
+       if (assign_irq_vector(irq, cfg, mask))
                return;
 
-       cfg = irq_cfg(irq);
+       set_extra_move_desc(desc, mask);
+
        cpus_and(tmp, cfg->domain, mask);
        dest = cpu_mask_to_apicid(tmp);
 
-       read_msi_msg(irq, &msg);
+       read_msi_msg_desc(desc, &msg);
 
        msg.data &= ~MSI_DATA_VECTOR_MASK;
        msg.data |= MSI_DATA_VECTOR(cfg->vector);
        msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
        msg.address_lo |= MSI_ADDR_DEST_ID(dest);
 
-       write_msi_msg(irq, &msg);
-       desc = irq_to_desc(irq);
+       write_msi_msg_desc(desc, &msg);
        desc->affinity = mask;
 }
-
 #ifdef CONFIG_INTR_REMAP
 /*
  * Migrate the MSI irq to another cpumask. This migration is
@@ -3064,11 +3323,11 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
  */
 static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
 {
+       struct irq_desc *desc = irq_to_desc(irq);
        struct irq_cfg *cfg;
        unsigned int dest;
        cpumask_t tmp, cleanup_mask;
        struct irte irte;
-       struct irq_desc *desc;
 
        cpus_and(tmp, mask, cpu_online_map);
        if (cpus_empty(tmp))
@@ -3077,10 +3336,12 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
        if (get_irte(irq, &irte))
                return;
 
-       if (assign_irq_vector(irq, mask))
+       cfg = desc->chip_data;
+       if (assign_irq_vector(irq, cfg, mask))
                return;
 
-       cfg = irq_cfg(irq);
+       set_extra_move_desc(desc, mask);
+
        cpus_and(tmp, cfg->domain, mask);
        dest = cpu_mask_to_apicid(tmp);
 
@@ -3104,9 +3365,9 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
                cfg->move_in_progress = 0;
        }
 
-       desc = irq_to_desc(irq);
        desc->affinity = mask;
 }
+
 #endif
 #endif /* CONFIG_SMP */
 
@@ -3165,7 +3426,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
 }
 #endif
 
-static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
+static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
 {
        int ret;
        struct msi_msg msg;
@@ -3174,7 +3435,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
        if (ret < 0)
                return ret;
 
-       set_irq_msi(irq, desc);
+       set_irq_msi(irq, msidesc);
        write_msi_msg(irq, &msg);
 
 #ifdef CONFIG_INTR_REMAP
@@ -3194,26 +3455,13 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
        return 0;
 }
 
-static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
-{
-       unsigned int irq;
-
-       irq = dev->bus->number;
-       irq <<= 8;
-       irq |= dev->devfn;
-       irq <<= 12;
-
-       return irq;
-}
-
-int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
 {
        unsigned int irq;
        int ret;
        unsigned int irq_want;
 
-       irq_want = build_irq_for_pci_dev(dev) + 0x100;
-
+       irq_want = nr_irqs_gsi;
        irq = create_irq_nr(irq_want);
        if (irq == 0)
                return -1;
@@ -3227,7 +3475,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
                goto error;
 no_ir:
 #endif
-       ret = setup_msi_irq(dev, desc, irq);
+       ret = setup_msi_irq(dev, msidesc, irq);
        if (ret < 0) {
                destroy_irq(irq);
                return ret;
@@ -3245,7 +3493,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 {
        unsigned int irq;
        int ret, sub_handle;
-       struct msi_desc *desc;
+       struct msi_desc *msidesc;
        unsigned int irq_want;
 
 #ifdef CONFIG_INTR_REMAP
@@ -3253,10 +3501,11 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
        int index = 0;
 #endif
 
-       irq_want = build_irq_for_pci_dev(dev) + 0x100;
+       irq_want = nr_irqs_gsi;
        sub_handle = 0;
-       list_for_each_entry(desc, &dev->msi_list, list) {
-               irq = create_irq_nr(irq_want--);
+       list_for_each_entry(msidesc, &dev->msi_list, list) {
+               irq = create_irq_nr(irq_want);
+               irq_want++;
                if (irq == 0)
                        return -1;
 #ifdef CONFIG_INTR_REMAP
@@ -3288,7 +3537,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
                }
 no_ir:
 #endif
-               ret = setup_msi_irq(dev, desc, irq);
+               ret = setup_msi_irq(dev, msidesc, irq);
                if (ret < 0)
                        goto error;
                sub_handle++;
@@ -3309,20 +3558,22 @@ void arch_teardown_msi_irq(unsigned int irq)
 #ifdef CONFIG_SMP
 static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
 {
+       struct irq_desc *desc = irq_to_desc(irq);
        struct irq_cfg *cfg;
        struct msi_msg msg;
        unsigned int dest;
        cpumask_t tmp;
-       struct irq_desc *desc;
 
        cpus_and(tmp, mask, cpu_online_map);
        if (cpus_empty(tmp))
                return;
 
-       if (assign_irq_vector(irq, mask))
+       cfg = desc->chip_data;
+       if (assign_irq_vector(irq, cfg, mask))
                return;
 
-       cfg = irq_cfg(irq);
+       set_extra_move_desc(desc, mask);
+
        cpus_and(tmp, cfg->domain, mask);
        dest = cpu_mask_to_apicid(tmp);
 
@@ -3334,9 +3585,9 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
        msg.address_lo |= MSI_ADDR_DEST_ID(dest);
 
        dmar_msi_write(irq, &msg);
-       desc = irq_to_desc(irq);
        desc->affinity = mask;
 }
+
 #endif /* CONFIG_SMP */
 
 struct irq_chip dmar_msi_type = {
@@ -3370,8 +3621,8 @@ int arch_setup_dmar_msi(unsigned int irq)
 #ifdef CONFIG_SMP
 static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
 {
+       struct irq_desc *desc = irq_to_desc(irq);
        struct irq_cfg *cfg;
-       struct irq_desc *desc;
        struct msi_msg msg;
        unsigned int dest;
        cpumask_t tmp;
@@ -3380,10 +3631,12 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
        if (cpus_empty(tmp))
                return;
 
-       if (assign_irq_vector(irq, mask))
+       cfg = desc->chip_data;
+       if (assign_irq_vector(irq, cfg, mask))
                return;
 
-       cfg = irq_cfg(irq);
+       set_extra_move_desc(desc, mask);
+
        cpus_and(tmp, cfg->domain, mask);
        dest = cpu_mask_to_apicid(tmp);
 
@@ -3395,9 +3648,9 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
        msg.address_lo |= MSI_ADDR_DEST_ID(dest);
 
        hpet_msi_write(irq, &msg);
-       desc = irq_to_desc(irq);
        desc->affinity = mask;
 }
+
 #endif /* CONFIG_SMP */
 
 struct irq_chip hpet_msi_type = {
@@ -3452,26 +3705,28 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
 
 static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
 {
+       struct irq_desc *desc = irq_to_desc(irq);
        struct irq_cfg *cfg;
        unsigned int dest;
        cpumask_t tmp;
-       struct irq_desc *desc;
 
        cpus_and(tmp, mask, cpu_online_map);
        if (cpus_empty(tmp))
                return;
 
-       if (assign_irq_vector(irq, mask))
+       cfg = desc->chip_data;
+       if (assign_irq_vector(irq, cfg, mask))
                return;
 
-       cfg = irq_cfg(irq);
+       set_extra_move_desc(desc, mask);
+
        cpus_and(tmp, cfg->domain, mask);
        dest = cpu_mask_to_apicid(tmp);
 
        target_ht_irq(irq, dest, cfg->vector);
-       desc = irq_to_desc(irq);
        desc->affinity = mask;
 }
+
 #endif
 
 static struct irq_chip ht_irq_chip = {
@@ -3491,13 +3746,13 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
        int err;
        cpumask_t tmp;
 
+       cfg = irq_cfg(irq);
        tmp = TARGET_CPUS;
-       err = assign_irq_vector(irq, tmp);
+       err = assign_irq_vector(irq, cfg, tmp);
        if (!err) {
                struct ht_irq_msg msg;
                unsigned dest;
 
-               cfg = irq_cfg(irq);
                cpus_and(tmp, cfg->domain, tmp);
                dest = cpu_mask_to_apicid(tmp);
 
@@ -3543,7 +3798,9 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
        unsigned long flags;
        int err;
 
-       err = assign_irq_vector(irq, *eligible_cpu);
+       cfg = irq_cfg(irq);
+
+       err = assign_irq_vector(irq, cfg, *eligible_cpu);
        if (err != 0)
                return err;
 
@@ -3552,8 +3809,6 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
                                      irq_name);
        spin_unlock_irqrestore(&vector_lock, flags);
 
-       cfg = irq_cfg(irq);
-
        mmr_value = 0;
        entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
        BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
@@ -3605,9 +3860,16 @@ int __init io_apic_get_redir_entries (int ioapic)
        return reg_01.bits.entries;
 }
 
-int __init probe_nr_irqs(void)
+void __init probe_nr_irqs_gsi(void)
 {
-       return NR_IRQS;
+       int idx;
+       int nr = 0;
+
+       for (idx = 0; idx < nr_ioapics; idx++)
+               nr += io_apic_get_redir_entries(idx) + 1;
+
+       if (nr > nr_irqs_gsi)
+               nr_irqs_gsi = nr;
 }
 
 /* --------------------------------------------------------------------------
@@ -3706,19 +3968,31 @@ int __init io_apic_get_version(int ioapic)
 
 int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
 {
+       struct irq_desc *desc;
+       struct irq_cfg *cfg;
+       int cpu = boot_cpu_id;
+
        if (!IO_APIC_IRQ(irq)) {
                apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
                        ioapic);
                return -EINVAL;
        }
 
+       desc = irq_to_desc_alloc_cpu(irq, cpu);
+       if (!desc) {
+               printk(KERN_INFO "can not get irq_desc %d\n", irq);
+               return 0;
+       }
+
        /*
         * IRQs < 16 are already in the irq_2_pin[] map
         */
-       if (irq >= 16)
-               add_pin_to_irq(irq, ioapic, pin);
+       if (irq >= NR_IRQS_LEGACY) {
+               cfg = desc->chip_data;
+               add_pin_to_irq_cpu(cfg, cpu, ioapic, pin);
+       }
 
-       setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
+       setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity);
 
        return 0;
 }
@@ -3772,9 +4046,10 @@ void __init setup_ioapic_dest(void)
                         * when you have too many devices, because at that time only boot
                         * cpu is online.
                         */
-                       cfg = irq_cfg(irq);
+                       desc = irq_to_desc(irq);
+                       cfg = desc->chip_data;
                        if (!cfg->vector) {
-                               setup_IO_APIC_irq(ioapic, pin, irq,
+                               setup_IO_APIC_irq(ioapic, pin, irq, desc,
                                                  irq_trigger(irq_entry),
                                                  irq_polarity(irq_entry));
                                continue;
@@ -3784,7 +4059,6 @@ void __init setup_ioapic_dest(void)
                        /*
                         * Honour affinities which have been set in early boot
                         */
-                       desc = irq_to_desc(irq);
                        if (desc->status &
                            (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
                                mask = desc->affinity;
@@ -3793,10 +4067,10 @@ void __init setup_ioapic_dest(void)
 
 #ifdef CONFIG_INTR_REMAP
                        if (intr_remapping_enabled)
-                               set_ir_ioapic_affinity_irq(irq, mask);
+                               set_ir_ioapic_affinity_irq_desc(desc, mask);
                        else
 #endif
-                               set_ioapic_affinity_irq(irq, mask);
+                               set_ioapic_affinity_irq_desc(desc, mask);
                }
 
        }
@@ -3845,7 +4119,6 @@ void __init ioapic_init_mappings(void)
        struct resource *ioapic_res;
        int i;
 
-       irq_2_pin_init();
        ioapic_res = ioapic_setup_resources();
        for (i = 0; i < nr_ioapics; i++) {
                if (smp_found_config) {
index d1d4dc52f649cdd90f360d042b1055d7694f4e59..3f1d9d18df679c858bda94daae4d578c0d8cbf93 100644 (file)
@@ -118,6 +118,9 @@ int show_interrupts(struct seq_file *p, void *v)
        }
 
        desc = irq_to_desc(i);
+       if (!desc)
+               return 0;
+
        spin_lock_irqsave(&desc->lock, flags);
 #ifndef CONFIG_SMP
        any_count = kstat_irqs(i);
index a51382672de0c5e5fb291bd6ff6bce13396a8f73..119fc9c8ff7f29135b0e353cc31722cd6abe452f 100644 (file)
@@ -242,6 +242,8 @@ void fixup_irqs(cpumask_t map)
        for_each_irq_desc(irq, desc) {
                cpumask_t mask;
 
+               if (!desc)
+                       continue;
                if (irq == 2)
                        continue;
 
index 1df869e5bd0b935fc12e5a65b254eeb985df3f6f..a174a217eb1aefc25c971c2d6155e483d2d50192 100644 (file)
@@ -91,6 +91,8 @@ void fixup_irqs(cpumask_t map)
                int break_affinity = 0;
                int set_affinity = 1;
 
+               if (!desc)
+                       continue;
                if (irq == 2)
                        continue;
 
index 607db63044a5e6f2ea383f71378a6a79ce85797b..203384ed2b5d85342f0ad9ea4f85adf45385acb5 100644 (file)
@@ -68,8 +68,7 @@ void __init init_ISA_irqs (void)
        /*
         * 16 old-style INTA-cycle interrupts:
         */
-       for (i = 0; i < 16; i++) {
-               /* first time call this irq_desc */
+       for (i = 0; i < NR_IRQS_LEGACY; i++) {
                struct irq_desc *desc = irq_to_desc(i);
 
                desc->status = IRQ_DISABLED;
index 8670b3ce626e7ee6bf0d379c610959a699d79aef..6190e6ef546cfc197e63d0a716861d94905b9a59 100644 (file)
@@ -76,8 +76,7 @@ void __init init_ISA_irqs(void)
        init_bsp_APIC();
        init_8259A(0);
 
-       for (i = 0; i < 16; i++) {
-               /* first time call this irq_desc */
+       for (i = 0; i < NR_IRQS_LEGACY; i++) {
                struct irq_desc *desc = irq_to_desc(i);
 
                desc->status = IRQ_DISABLED;
index 7a3dfceb90e47a341295ca3f1201c232b5beb814..19a1044a0cd94fbdc64b6ffd1e3df8ecde4530c9 100644 (file)
@@ -101,11 +101,15 @@ static void __init dma32_free_bootmem(void)
        dma32_bootmem_ptr = NULL;
        dma32_bootmem_size = 0;
 }
+#endif
 
 void __init pci_iommu_alloc(void)
 {
+#ifdef CONFIG_X86_64
        /* free the range so iommu could get some range less than 4G */
        dma32_free_bootmem();
+#endif
+
        /*
         * The order of these functions is important for
         * fall-back/fail-over reasons
@@ -121,15 +125,6 @@ void __init pci_iommu_alloc(void)
        pci_swiotlb_init();
 }
 
-unsigned long iommu_nr_pages(unsigned long addr, unsigned long len)
-{
-       unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
-
-       return size >> PAGE_SHIFT;
-}
-EXPORT_SYMBOL(iommu_nr_pages);
-#endif
-
 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
                                 dma_addr_t *dma_addr, gfp_t flag)
 {
index 3c539d111abbacc9d2826ec6bcee5bbda30bfe44..242c3440687faea181b67e9f1607f76d04ee9780 100644 (file)
@@ -3,6 +3,8 @@
 #include <linux/pci.h>
 #include <linux/cache.h>
 #include <linux/module.h>
+#include <linux/swiotlb.h>
+#include <linux/bootmem.h>
 #include <linux/dma-mapping.h>
 
 #include <asm/iommu.h>
 
 int swiotlb __read_mostly;
 
+void *swiotlb_alloc_boot(size_t size, unsigned long nslabs)
+{
+       return alloc_bootmem_low_pages(size);
+}
+
+void *swiotlb_alloc(unsigned order, unsigned long nslabs)
+{
+       return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
+}
+
+dma_addr_t swiotlb_phys_to_bus(phys_addr_t paddr)
+{
+       return paddr;
+}
+
+phys_addr_t swiotlb_bus_to_phys(dma_addr_t baddr)
+{
+       return baddr;
+}
+
+int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
+{
+       return 0;
+}
+
 static dma_addr_t
 swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
                        int direction)
@@ -50,8 +77,10 @@ struct dma_mapping_ops swiotlb_dma_ops = {
 void __init pci_swiotlb_init(void)
 {
        /* don't initialize swiotlb if iommu=off (no_iommu=1) */
+#ifdef CONFIG_X86_64
        if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)
               swiotlb = 1;
+#endif
        if (swiotlb_force)
                swiotlb = 1;
        if (swiotlb) {
index 67465ed8931088b52d9b521cb3a116511cef9e42..309949e9e1c1a9e65969507cb7dea1097a2410d9 100644 (file)
@@ -168,6 +168,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
                         ich_force_enable_hpet);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
                         ich_force_enable_hpet);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
+                        ich_force_enable_hpet);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
                         ich_force_enable_hpet);
 
index 08e02e8453c9a147d838daab9c21421708d22350..ae0d8042cf69527542b4e554d4567eea154eff45 100644 (file)
@@ -953,7 +953,7 @@ void __init setup_arch(char **cmdline_p)
        ioapic_init_mappings();
 
        /* need to wait for io_apic is mapped */
-       nr_irqs = probe_nr_irqs();
+       probe_nr_irqs_gsi();
 
        kvm_guest_init();
 
index 9e68075544f6dbb5e9a6fbc002e3a2df3091dc6c..4a20b2f9a381a360b46246c2c21c941258c1367a 100644 (file)
@@ -39,7 +39,7 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
 #define __do_strncpy_from_user(dst, src, count, res)                      \
 do {                                                                      \
        int __d0, __d1, __d2;                                              \
-       might_sleep();                                                     \
+       might_fault();                                                     \
        __asm__ __volatile__(                                              \
                "       testl %1,%1\n"                                     \
                "       jz 2f\n"                                           \
@@ -126,7 +126,7 @@ EXPORT_SYMBOL(strncpy_from_user);
 #define __do_clear_user(addr,size)                                     \
 do {                                                                   \
        int __d0;                                                       \
-       might_sleep();                                                  \
+       might_fault();                                                  \
        __asm__ __volatile__(                                           \
                "0:     rep; stosl\n"                                   \
                "       movl %2,%0\n"                                   \
@@ -155,7 +155,7 @@ do {                                                                        \
 unsigned long
 clear_user(void __user *to, unsigned long n)
 {
-       might_sleep();
+       might_fault();
        if (access_ok(VERIFY_WRITE, to, n))
                __do_clear_user(to, n);
        return n;
@@ -197,7 +197,7 @@ long strnlen_user(const char __user *s, long n)
        unsigned long mask = -__addr_ok(s);
        unsigned long res, tmp;
 
-       might_sleep();
+       might_fault();
 
        __asm__ __volatile__(
                "       testl %0, %0\n"
index f4df6e7c718be506a59ef157bda9591dfb1790a6..64d6c84e6353e9d50ce3d7f7cda6ee7178ca3494 100644 (file)
@@ -15,7 +15,7 @@
 #define __do_strncpy_from_user(dst,src,count,res)                         \
 do {                                                                      \
        long __d0, __d1, __d2;                                             \
-       might_sleep();                                                     \
+       might_fault();                                                     \
        __asm__ __volatile__(                                              \
                "       testq %1,%1\n"                                     \
                "       jz 2f\n"                                           \
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(strncpy_from_user);
 unsigned long __clear_user(void __user *addr, unsigned long size)
 {
        long __d0;
-       might_sleep();
+       might_fault();
        /* no memory constraint because it doesn't change any memory gcc knows
           about */
        asm volatile(
index 800e1d94c1b5580e627ddb0a2ef81cfd10d031cd..8655b5bb0963f807a0fe49dcd3da367db19379f0 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
+#include <linux/pci.h>
 #include <linux/pfn.h>
 #include <linux/poison.h>
 #include <linux/bootmem.h>
@@ -967,6 +968,8 @@ void __init mem_init(void)
        int codesize, reservedpages, datasize, initsize;
        int tmp;
 
+       pci_iommu_alloc();
+
 #ifdef CONFIG_FLATMEM
        BUG_ON(!mem_map);
 #endif
index 290b219fad9c5a1b825d495c8ea427bda7b04c5c..ac0956f77785ad55cd9d771ab65b9ce7c4797716 100644 (file)
@@ -24,21 +24,17 @@ menuconfig BLOCK
 if BLOCK
 
 config LBD
-       bool "Support for Large Block Devices"
+       bool "Support for large block devices and files"
        depends on !64BIT
        help
-         Enable block devices of size 2TB and larger.
+         Enable block devices or files of size 2TB and larger.
 
          This option is required to support the full capacity of large
          (2TB+) block devices, including RAID, disk, Network Block Device,
          Logical Volume Manager (LVM) and loopback.
-
-         For example, RAID devices are frequently bigger than the capacity
-         of the largest individual hard drive.
-
-         This option is not required if you have individual disk drives
-         which total 2TB+ and you are not aggregating the capacity into
-         a large block device (e.g. using RAID or LVM).
+       
+         This option also enables support for single files larger than
+         2TB.
 
          If unsure, say N.
 
@@ -58,15 +54,6 @@ config BLK_DEV_IO_TRACE
 
          If unsure, say N.
 
-config LSF
-       bool "Support for Large Single Files"
-       depends on !64BIT
-       help
-         Say Y here if you want to be able to handle very large files (2TB
-         and larger), otherwise say N.
-
-         If unsure, say Y.
-
 config BLK_DEV_BSG
        bool "Block layer SG support v4 (EXPERIMENTAL)"
        depends on EXPERIMENTAL
index 71f0abb219eee2556d41dcc9f1f1e01834b6d8aa..631f6f44460a2bd7ad7218f89d5e15bb90742ee6 100644 (file)
@@ -1339,12 +1339,12 @@ static int as_may_queue(struct request_queue *q, int rw)
        return ret;
 }
 
-static void as_exit_queue(elevator_t *e)
+static void as_exit_queue(struct elevator_queue *e)
 {
        struct as_data *ad = e->elevator_data;
 
        del_timer_sync(&ad->antic_timer);
-       kblockd_flush_work(&ad->antic_work);
+       cancel_work_sync(&ad->antic_work);
 
        BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
        BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));
@@ -1409,7 +1409,7 @@ as_var_store(unsigned long *var, const char *page, size_t count)
        return count;
 }
 
-static ssize_t est_time_show(elevator_t *e, char *page)
+static ssize_t est_time_show(struct elevator_queue *e, char *page)
 {
        struct as_data *ad = e->elevator_data;
        int pos = 0;
@@ -1427,7 +1427,7 @@ static ssize_t est_time_show(elevator_t *e, char *page)
 }
 
 #define SHOW_FUNCTION(__FUNC, __VAR)                           \
-static ssize_t __FUNC(elevator_t *e, char *page)               \
+static ssize_t __FUNC(struct elevator_queue *e, char *page)    \
 {                                                              \
        struct as_data *ad = e->elevator_data;                  \
        return as_var_show(jiffies_to_msecs((__VAR)), (page));  \
@@ -1440,7 +1440,7 @@ SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)                                \
-static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)   \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)        \
 {                                                                      \
        struct as_data *ad = e->elevator_data;                          \
        int ret = as_var_store(__PTR, (page), count);                   \
index 6e72d661ae425daa62f8b7999acc8f825e2a12e9..8eba4e43bb0c505c9c0e846ddd33caf0f15bfdd7 100644 (file)
@@ -24,8 +24,8 @@
 int blk_queue_ordered(struct request_queue *q, unsigned ordered,
                      prepare_flush_fn *prepare_flush_fn)
 {
-       if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
-           prepare_flush_fn == NULL) {
+       if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
+                                            QUEUE_ORDERED_DO_POSTFLUSH))) {
                printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
                return -EINVAL;
        }
@@ -88,7 +88,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
                return QUEUE_ORDSEQ_DONE;
 }
 
-void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
+bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
 {
        struct request *rq;
 
@@ -99,7 +99,7 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
        q->ordseq |= seq;
 
        if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
-               return;
+               return false;
 
        /*
         * Okay, sequence complete.
@@ -109,6 +109,8 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
 
        if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
                BUG();
+
+       return true;
 }
 
 static void pre_flush_end_io(struct request *rq, int error)
@@ -134,7 +136,7 @@ static void queue_flush(struct request_queue *q, unsigned which)
        struct request *rq;
        rq_end_io_fn *end_io;
 
-       if (which == QUEUE_ORDERED_PREFLUSH) {
+       if (which == QUEUE_ORDERED_DO_PREFLUSH) {
                rq = &q->pre_flush_rq;
                end_io = pre_flush_end_io;
        } else {
@@ -151,80 +153,110 @@ static void queue_flush(struct request_queue *q, unsigned which)
        elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 }
 
-static inline struct request *start_ordered(struct request_queue *q,
-                                           struct request *rq)
+static inline bool start_ordered(struct request_queue *q, struct request **rqp)
 {
+       struct request *rq = *rqp;
+       unsigned skip = 0;
+
        q->orderr = 0;
        q->ordered = q->next_ordered;
        q->ordseq |= QUEUE_ORDSEQ_STARTED;
 
        /*
-        * Prep proxy barrier request.
+        * For an empty barrier, there's no actual BAR request, which
+        * in turn makes POSTFLUSH unnecessary.  Mask them off.
         */
+       if (!rq->hard_nr_sectors) {
+               q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
+                               QUEUE_ORDERED_DO_POSTFLUSH);
+               /*
+                * Empty barrier on a write-through device w/ ordered
+                * tag has no command to issue and without any command
+                * to issue, ordering by tag can't be used.  Drain
+                * instead.
+                */
+               if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
+                   !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
+                       q->ordered &= ~QUEUE_ORDERED_BY_TAG;
+                       q->ordered |= QUEUE_ORDERED_BY_DRAIN;
+               }
+       }
+
+       /* stash away the original request */
        elv_dequeue_request(q, rq);
        q->orig_bar_rq = rq;
-       rq = &q->bar_rq;
-       blk_rq_init(q, rq);
-       if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
-               rq->cmd_flags |= REQ_RW;
-       if (q->ordered & QUEUE_ORDERED_FUA)
-               rq->cmd_flags |= REQ_FUA;
-       init_request_from_bio(rq, q->orig_bar_rq->bio);
-       rq->end_io = bar_end_io;
+       rq = NULL;
 
        /*
         * Queue ordered sequence.  As we stack them at the head, we
         * need to queue in reverse order.  Note that we rely on that
         * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
-        * request gets inbetween ordered sequence. If this request is
-        * an empty barrier, we don't need to do a postflush ever since
-        * there will be no data written between the pre and post flush.
-        * Hence a single flush will suffice.
+        * request gets inbetween ordered sequence.
         */
-       if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
-               queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
-       else
-               q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
+       if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
+               queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
+               rq = &q->post_flush_rq;
+       } else
+               skip |= QUEUE_ORDSEQ_POSTFLUSH;
 
-       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+       if (q->ordered & QUEUE_ORDERED_DO_BAR) {
+               rq = &q->bar_rq;
+
+               /* initialize proxy request and queue it */
+               blk_rq_init(q, rq);
+               if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
+                       rq->cmd_flags |= REQ_RW;
+               if (q->ordered & QUEUE_ORDERED_DO_FUA)
+                       rq->cmd_flags |= REQ_FUA;
+               init_request_from_bio(rq, q->orig_bar_rq->bio);
+               rq->end_io = bar_end_io;
 
-       if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
-               queue_flush(q, QUEUE_ORDERED_PREFLUSH);
+               elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+       } else
+               skip |= QUEUE_ORDSEQ_BAR;
+
+       if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
+               queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
                rq = &q->pre_flush_rq;
        } else
-               q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
+               skip |= QUEUE_ORDSEQ_PREFLUSH;
 
-       if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
-               q->ordseq |= QUEUE_ORDSEQ_DRAIN;
-       else
+       if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
                rq = NULL;
+       else
+               skip |= QUEUE_ORDSEQ_DRAIN;
+
+       *rqp = rq;
 
-       return rq;
+       /*
+        * Complete skipped sequences.  If whole sequence is complete,
+        * return false to tell elevator that this request is gone.
+        */
+       return !blk_ordered_complete_seq(q, skip, 0);
 }
 
-int blk_do_ordered(struct request_queue *q, struct request **rqp)
+bool blk_do_ordered(struct request_queue *q, struct request **rqp)
 {
        struct request *rq = *rqp;
        const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
 
        if (!q->ordseq) {
                if (!is_barrier)
-                       return 1;
+                       return true;
 
-               if (q->next_ordered != QUEUE_ORDERED_NONE) {
-                       *rqp = start_ordered(q, rq);
-                       return 1;
-               } else {
+               if (q->next_ordered != QUEUE_ORDERED_NONE)
+                       return start_ordered(q, rqp);
+               else {
                        /*
-                        * This can happen when the queue switches to
-                        * ORDERED_NONE while this request is on it.
+                        * Queue ordering not supported.  Terminate
+                        * with prejudice.
                         */
                        elv_dequeue_request(q, rq);
                        if (__blk_end_request(rq, -EOPNOTSUPP,
                                              blk_rq_bytes(rq)))
                                BUG();
                        *rqp = NULL;
-                       return 0;
+                       return false;
                }
        }
 
@@ -235,9 +267,9 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
        /* Special requests are not subject to ordering rules. */
        if (!blk_fs_request(rq) &&
            rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
-               return 1;
+               return true;
 
-       if (q->ordered & QUEUE_ORDERED_TAG) {
+       if (q->ordered & QUEUE_ORDERED_BY_TAG) {
                /* Ordered by tag.  Blocking the next barrier is enough. */
                if (is_barrier && rq != &q->bar_rq)
                        *rqp = NULL;
@@ -248,7 +280,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
                        *rqp = NULL;
        }
 
-       return 1;
+       return true;
 }
 
 static void bio_end_empty_barrier(struct bio *bio, int err)
index 561e8a1b43a4a526e405905c69cf8f0e15a4df27..a824e49c0d0ac0567ad8294f6289544fa40553b6 100644 (file)
@@ -153,6 +153,9 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
                        nbytes = bio->bi_size;
                }
 
+               if (unlikely(rq->cmd_flags & REQ_QUIET))
+                       set_bit(BIO_QUIET, &bio->bi_flags);
+
                bio->bi_size -= nbytes;
                bio->bi_sector += (nbytes >> 9);
 
@@ -265,8 +268,7 @@ void __generic_unplug_device(struct request_queue *q)
 {
        if (unlikely(blk_queue_stopped(q)))
                return;
-
-       if (!blk_remove_plug(q))
+       if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
                return;
 
        q->request_fn(q);
@@ -404,7 +406,8 @@ EXPORT_SYMBOL(blk_stop_queue);
 void blk_sync_queue(struct request_queue *q)
 {
        del_timer_sync(&q->unplug_timer);
-       kblockd_flush_work(&q->unplug_work);
+       del_timer_sync(&q->timeout);
+       cancel_work_sync(&q->unplug_work);
 }
 EXPORT_SYMBOL(blk_sync_queue);
 
@@ -1135,7 +1138,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 static int __make_request(struct request_queue *q, struct bio *bio)
 {
        struct request *req;
-       int el_ret, nr_sectors, barrier, discard, err;
+       int el_ret, nr_sectors;
        const unsigned short prio = bio_prio(bio);
        const int sync = bio_sync(bio);
        int rw_flags;
@@ -1149,22 +1152,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
         */
        blk_queue_bounce(q, &bio);
 
-       barrier = bio_barrier(bio);
-       if (unlikely(barrier) && bio_has_data(bio) &&
-           (q->next_ordered == QUEUE_ORDERED_NONE)) {
-               err = -EOPNOTSUPP;
-               goto end_io;
-       }
-
-       discard = bio_discard(bio);
-       if (unlikely(discard) && !q->prepare_discard_fn) {
-               err = -EOPNOTSUPP;
-               goto end_io;
-       }
-
        spin_lock_irq(q->queue_lock);
 
-       if (unlikely(barrier) || elv_queue_empty(q))
+       if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
                goto get_rq;
 
        el_ret = elv_merge(q, &req, bio);
@@ -1250,18 +1240,14 @@ get_rq:
        if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
            bio_flagged(bio, BIO_CPU_AFFINE))
                req->cpu = blk_cpu_to_group(smp_processor_id());
-       if (elv_queue_empty(q))
+       if (!blk_queue_nonrot(q) && elv_queue_empty(q))
                blk_plug_device(q);
        add_request(q, req);
 out:
-       if (sync)
+       if (sync || blk_queue_nonrot(q))
                __generic_unplug_device(q);
        spin_unlock_irq(q->queue_lock);
        return 0;
-
-end_io:
-       bio_endio(bio, err);
-       return 0;
 }
 
 /*
@@ -1414,15 +1400,13 @@ static inline void __generic_make_request(struct bio *bio)
                char b[BDEVNAME_SIZE];
 
                q = bdev_get_queue(bio->bi_bdev);
-               if (!q) {
+               if (unlikely(!q)) {
                        printk(KERN_ERR
                               "generic_make_request: Trying to access "
                                "nonexistent block-device %s (%Lu)\n",
                                bdevname(bio->bi_bdev, b),
                                (long long) bio->bi_sector);
-end_io:
-                       bio_endio(bio, err);
-                       break;
+                       goto end_io;
                }
 
                if (unlikely(nr_sectors > q->max_hw_sectors)) {
@@ -1459,14 +1443,19 @@ end_io:
 
                if (bio_check_eod(bio, nr_sectors))
                        goto end_io;
-               if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) ||
-                   (bio_discard(bio) && !q->prepare_discard_fn)) {
+
+               if (bio_discard(bio) && !q->prepare_discard_fn) {
                        err = -EOPNOTSUPP;
                        goto end_io;
                }
 
                ret = q->make_request_fn(q, bio);
        } while (ret);
+
+       return;
+
+end_io:
+       bio_endio(bio, err);
 }
 
 /*
@@ -1716,14 +1705,6 @@ static int __end_that_request_first(struct request *req, int error,
        while ((bio = req->bio) != NULL) {
                int nbytes;
 
-               /*
-                * For an empty barrier request, the low level driver must
-                * store a potential error location in ->sector. We pass
-                * that back up in ->bi_sector.
-                */
-               if (blk_empty_barrier(req))
-                       bio->bi_sector = req->sector;
-
                if (nr_bytes >= bio->bi_size) {
                        req->bio = bio->bi_next;
                        nbytes = bio->bi_size;
@@ -2143,12 +2124,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 }
 EXPORT_SYMBOL(kblockd_schedule_work);
 
-void kblockd_flush_work(struct work_struct *work)
-{
-       cancel_work_sync(work);
-}
-EXPORT_SYMBOL(kblockd_flush_work);
-
 int __init blk_dev_init(void)
 {
        kblockd_workqueue = create_workqueue("kblockd");
index afa55e14e27896837cb026ce706438520ea848aa..59fd05d9f1d5d8022d1b16a5263d6ae087aadb21 100644 (file)
@@ -319,9 +319,9 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
        t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
 
-       t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
-       t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
-       t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
+       t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
+       t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
+       t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
        t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
        if (!t->queue_lock)
                WARN_ON_ONCE(1);
index e660d26ca656f060ca0c5a61952681c7b7d55e66..ce0efc6b26dc54cfabf225d4a78265ef401a9a24 100644 (file)
@@ -161,7 +161,7 @@ void blk_complete_request(struct request *req)
 }
 EXPORT_SYMBOL(blk_complete_request);
 
-__init int blk_softirq_init(void)
+static __init int blk_softirq_init(void)
 {
        int i;
 
index 21e275d7eed9444ab876834db2345eb78f4ae183..a29cb788e408e720b0b8023fca95a9f3d26a3e8c 100644 (file)
@@ -88,9 +88,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
        unsigned long ra_kb;
        ssize_t ret = queue_var_store(&ra_kb, page, count);
 
-       spin_lock_irq(q->queue_lock);
        q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
-       spin_unlock_irq(q->queue_lock);
 
        return ret;
 }
@@ -117,10 +115,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 
        if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
                return -EINVAL;
-       /*
-        * Take the queue lock to update the readahead and max_sectors
-        * values synchronously:
-        */
+
        spin_lock_irq(q->queue_lock);
        q->max_sectors = max_sectors_kb << 1;
        spin_unlock_irq(q->queue_lock);
index c0d419e84ce7f8518e1246e3ec61b07f5c81efe8..3c518e3303ae34113516924f9d73a58b94a1e75c 100644 (file)
@@ -158,7 +158,6 @@ fail:
 /**
  * blk_init_tags - initialize the tag info for an external tag map
  * @depth:     the maximum queue depth supported
- * @tags: the tag to use
  **/
 struct blk_queue_tag *blk_init_tags(int depth)
 {
index 69185ea9fae26d4f643b6e70e07a3cd2e2265100..a09535377a94c0b84a59118533f5e4ddeaa1076d 100644 (file)
@@ -73,11 +73,7 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
  */
 void blk_delete_timer(struct request *req)
 {
-       struct request_queue *q = req->q;
-
        list_del_init(&req->timeout_list);
-       if (list_empty(&q->timeout_list))
-               del_timer(&q->timeout);
 }
 
 static void blk_rq_timed_out(struct request *req)
@@ -111,7 +107,7 @@ static void blk_rq_timed_out(struct request *req)
 void blk_rq_timed_out_timer(unsigned long data)
 {
        struct request_queue *q = (struct request_queue *) data;
-       unsigned long flags, uninitialized_var(next), next_set = 0;
+       unsigned long flags, next = 0;
        struct request *rq, *tmp;
 
        spin_lock_irqsave(q->queue_lock, flags);
@@ -126,15 +122,18 @@ void blk_rq_timed_out_timer(unsigned long data)
                        if (blk_mark_rq_complete(rq))
                                continue;
                        blk_rq_timed_out(rq);
+               } else {
+                       if (!next || time_after(next, rq->deadline))
+                               next = rq->deadline;
                }
-               if (!next_set) {
-                       next = rq->deadline;
-                       next_set = 1;
-               } else if (time_after(next, rq->deadline))
-                       next = rq->deadline;
        }
 
-       if (next_set && !list_empty(&q->timeout_list))
+       /*
+        * next can never be 0 here with the list non-empty, since we always
+        * bump ->deadline to 1 so we can detect if the timer was ever added
+        * or not. See comment in blk_add_timer()
+        */
+       if (next)
                mod_timer(&q->timeout, round_jiffies_up(next));
 
        spin_unlock_irqrestore(q->queue_lock, flags);
index 6a062eebbd15301320e7491b5dd45d17f2204a3c..e8525fa72823a0c88adfd386e84e7b3e5db33af7 100644 (file)
@@ -1136,12 +1136,8 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
                if (cfq_class_idle(cfqq))
                        max_dispatch = 1;
 
-               if (cfqq->dispatched >= max_dispatch) {
-                       if (cfqd->busy_queues > 1)
-                               break;
-                       if (cfqq->dispatched >= 4 * max_dispatch)
-                               break;
-               }
+               if (cfqq->dispatched >= max_dispatch && cfqd->busy_queues > 1)
+                       break;
 
                if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
                        break;
@@ -1318,7 +1314,15 @@ static void cfq_exit_single_io_context(struct io_context *ioc,
                unsigned long flags;
 
                spin_lock_irqsave(q->queue_lock, flags);
-               __cfq_exit_single_io_context(cfqd, cic);
+
+               /*
+                * Ensure we get a fresh copy of the ->key to prevent
+                * race between exiting task and queue
+                */
+               smp_read_barrier_depends();
+               if (cic->key)
+                       __cfq_exit_single_io_context(cfqd, cic);
+
                spin_unlock_irqrestore(q->queue_lock, flags);
        }
 }
@@ -2160,7 +2164,7 @@ out_cont:
 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
 {
        del_timer_sync(&cfqd->idle_slice_timer);
-       kblockd_flush_work(&cfqd->unplug_work);
+       cancel_work_sync(&cfqd->unplug_work);
 }
 
 static void cfq_put_async_queues(struct cfq_data *cfqd)
@@ -2178,7 +2182,7 @@ static void cfq_put_async_queues(struct cfq_data *cfqd)
                cfq_put_queue(cfqd->async_idle_cfqq);
 }
 
-static void cfq_exit_queue(elevator_t *e)
+static void cfq_exit_queue(struct elevator_queue *e)
 {
        struct cfq_data *cfqd = e->elevator_data;
        struct request_queue *q = cfqd->queue;
@@ -2288,7 +2292,7 @@ cfq_var_store(unsigned int *var, const char *page, size_t count)
 }
 
 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                           \
-static ssize_t __FUNC(elevator_t *e, char *page)                       \
+static ssize_t __FUNC(struct elevator_queue *e, char *page)            \
 {                                                                      \
        struct cfq_data *cfqd = e->elevator_data;                       \
        unsigned int __data = __VAR;                                    \
@@ -2308,7 +2312,7 @@ SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
-static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)   \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)        \
 {                                                                      \
        struct cfq_data *cfqd = e->elevator_data;                       \
        unsigned int __data;                                            \
index 67eb93cff699ba2a4dc9231f73c0d4d46d9fc443..f87615dea46bbd9dfcdf0789f170c2b9b8512f6a 100644 (file)
@@ -774,9 +774,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                bdi = blk_get_backing_dev_info(bdev);
                if (bdi == NULL)
                        return -ENOTTY;
-               lock_kernel();
                bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
-               unlock_kernel();
                return 0;
        case BLKGETSIZE:
                size = bdev->bd_inode->i_size;
index fd311179f44c771f5c4ca49c2d67b3812fe47eb9..c4d991d4adef0e7fd6e0d432941b88771d7c6530 100644 (file)
@@ -334,7 +334,7 @@ static int deadline_queue_empty(struct request_queue *q)
                && list_empty(&dd->fifo_list[READ]);
 }
 
-static void deadline_exit_queue(elevator_t *e)
+static void deadline_exit_queue(struct elevator_queue *e)
 {
        struct deadline_data *dd = e->elevator_data;
 
@@ -387,7 +387,7 @@ deadline_var_store(int *var, const char *page, size_t count)
 }
 
 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                           \
-static ssize_t __FUNC(elevator_t *e, char *page)                       \
+static ssize_t __FUNC(struct elevator_queue *e, char *page)            \
 {                                                                      \
        struct deadline_data *dd = e->elevator_data;                    \
        int __data = __VAR;                                             \
@@ -403,7 +403,7 @@ SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
-static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)   \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)        \
 {                                                                      \
        struct deadline_data *dd = e->elevator_data;                    \
        int __data;                                                     \
index 86836dd179c05476078719869a0481777e999bc1..98259eda0ef66d4051cc5da958c0191f353b8158 100644 (file)
@@ -65,7 +65,7 @@ DEFINE_TRACE(block_rq_issue);
 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
 {
        struct request_queue *q = rq->q;
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
 
        if (e->ops->elevator_allow_merge_fn)
                return e->ops->elevator_allow_merge_fn(q, rq, bio);
@@ -208,13 +208,13 @@ __setup("elevator=", elevator_setup);
 
 static struct kobj_type elv_ktype;
 
-static elevator_t *elevator_alloc(struct request_queue *q,
+static struct elevator_queue *elevator_alloc(struct request_queue *q,
                                  struct elevator_type *e)
 {
-       elevator_t *eq;
+       struct elevator_queue *eq;
        int i;
 
-       eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
+       eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
        if (unlikely(!eq))
                goto err;
 
@@ -240,8 +240,9 @@ err:
 
 static void elevator_release(struct kobject *kobj)
 {
-       elevator_t *e = container_of(kobj, elevator_t, kobj);
+       struct elevator_queue *e;
 
+       e = container_of(kobj, struct elevator_queue, kobj);
        elevator_put(e->elevator_type);
        kfree(e->hash);
        kfree(e);
@@ -297,7 +298,7 @@ int elevator_init(struct request_queue *q, char *name)
 }
 EXPORT_SYMBOL(elevator_init);
 
-void elevator_exit(elevator_t *e)
+void elevator_exit(struct elevator_queue *e)
 {
        mutex_lock(&e->sysfs_lock);
        if (e->ops->elevator_exit_fn)
@@ -311,7 +312,7 @@ EXPORT_SYMBOL(elevator_exit);
 
 static void elv_activate_rq(struct request_queue *q, struct request *rq)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
 
        if (e->ops->elevator_activate_req_fn)
                e->ops->elevator_activate_req_fn(q, rq);
@@ -319,7 +320,7 @@ static void elv_activate_rq(struct request_queue *q, struct request *rq)
 
 static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
 
        if (e->ops->elevator_deactivate_req_fn)
                e->ops->elevator_deactivate_req_fn(q, rq);
@@ -338,7 +339,7 @@ static void elv_rqhash_del(struct request_queue *q, struct request *rq)
 
 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
 
        BUG_ON(ELV_ON_HASH(rq));
        hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
@@ -352,7 +353,7 @@ static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
 
 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
        struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
        struct hlist_node *entry, *next;
        struct request *rq;
@@ -494,7 +495,7 @@ EXPORT_SYMBOL(elv_dispatch_add_tail);
 
 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
        struct request *__rq;
        int ret;
 
@@ -529,7 +530,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
 
 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
 
        if (e->ops->elevator_merged_fn)
                e->ops->elevator_merged_fn(q, rq, type);
@@ -543,7 +544,7 @@ void elv_merged_request(struct request_queue *q, struct request *rq, int type)
 void elv_merge_requests(struct request_queue *q, struct request *rq,
                             struct request *next)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
 
        if (e->ops->elevator_merge_req_fn)
                e->ops->elevator_merge_req_fn(q, rq, next);
@@ -755,14 +756,6 @@ struct request *elv_next_request(struct request_queue *q)
        int ret;
 
        while ((rq = __elv_next_request(q)) != NULL) {
-               /*
-                * Kill the empty barrier place holder, the driver must
-                * not ever see it.
-                */
-               if (blk_empty_barrier(rq)) {
-                       __blk_end_request(rq, 0, blk_rq_bytes(rq));
-                       continue;
-               }
                if (!(rq->cmd_flags & REQ_STARTED)) {
                        /*
                         * This is the first time the device driver
@@ -854,7 +847,7 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
 
 int elv_queue_empty(struct request_queue *q)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
 
        if (!list_empty(&q->queue_head))
                return 0;
@@ -868,7 +861,7 @@ EXPORT_SYMBOL(elv_queue_empty);
 
 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
 
        if (e->ops->elevator_latter_req_fn)
                return e->ops->elevator_latter_req_fn(q, rq);
@@ -877,7 +870,7 @@ struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 
 struct request *elv_former_request(struct request_queue *q, struct request *rq)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
 
        if (e->ops->elevator_former_req_fn)
                return e->ops->elevator_former_req_fn(q, rq);
@@ -886,7 +879,7 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
 
 int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
 
        if (e->ops->elevator_set_req_fn)
                return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
@@ -897,7 +890,7 @@ int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
 
 void elv_put_request(struct request_queue *q, struct request *rq)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
 
        if (e->ops->elevator_put_req_fn)
                e->ops->elevator_put_req_fn(rq);
@@ -905,7 +898,7 @@ void elv_put_request(struct request_queue *q, struct request *rq)
 
 int elv_may_queue(struct request_queue *q, int rw)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
 
        if (e->ops->elevator_may_queue_fn)
                return e->ops->elevator_may_queue_fn(q, rw);
@@ -928,7 +921,7 @@ EXPORT_SYMBOL(elv_abort_queue);
 
 void elv_completed_request(struct request_queue *q, struct request *rq)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
 
        /*
         * request is released from the driver, io must be done
@@ -944,10 +937,14 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
         * drained for flush sequence.
         */
        if (unlikely(q->ordseq)) {
-               struct request *first_rq = list_entry_rq(q->queue_head.next);
-               if (q->in_flight == 0 &&
+               struct request *next = NULL;
+
+               if (!list_empty(&q->queue_head))
+                       next = list_entry_rq(q->queue_head.next);
+
+               if (!q->in_flight &&
                    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
-                   blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
+                   (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
                        blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
                        blk_start_queueing(q);
                }
@@ -959,13 +956,14 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
 static ssize_t
 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 {
-       elevator_t *e = container_of(kobj, elevator_t, kobj);
        struct elv_fs_entry *entry = to_elv(attr);
+       struct elevator_queue *e;
        ssize_t error;
 
        if (!entry->show)
                return -EIO;
 
+       e = container_of(kobj, struct elevator_queue, kobj);
        mutex_lock(&e->sysfs_lock);
        error = e->ops ? entry->show(e, page) : -ENOENT;
        mutex_unlock(&e->sysfs_lock);
@@ -976,13 +974,14 @@ static ssize_t
 elv_attr_store(struct kobject *kobj, struct attribute *attr,
               const char *page, size_t length)
 {
-       elevator_t *e = container_of(kobj, elevator_t, kobj);
        struct elv_fs_entry *entry = to_elv(attr);
+       struct elevator_queue *e;
        ssize_t error;
 
        if (!entry->store)
                return -EIO;
 
+       e = container_of(kobj, struct elevator_queue, kobj);
        mutex_lock(&e->sysfs_lock);
        error = e->ops ? entry->store(e, page, length) : -ENOENT;
        mutex_unlock(&e->sysfs_lock);
@@ -1001,7 +1000,7 @@ static struct kobj_type elv_ktype = {
 
 int elv_register_queue(struct request_queue *q)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
        int error;
 
        error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
@@ -1019,7 +1018,7 @@ int elv_register_queue(struct request_queue *q)
        return error;
 }
 
-static void __elv_unregister_queue(elevator_t *e)
+static void __elv_unregister_queue(struct elevator_queue *e)
 {
        kobject_uevent(&e->kobj, KOBJ_REMOVE);
        kobject_del(&e->kobj);
@@ -1082,7 +1081,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
  */
 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 {
-       elevator_t *old_elevator, *e;
+       struct elevator_queue *old_elevator, *e;
        void *data;
 
        /*
@@ -1188,7 +1187,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
 
 ssize_t elv_iosched_show(struct request_queue *q, char *name)
 {
-       elevator_t *e = q->elevator;
+       struct elevator_queue *e = q->elevator;
        struct elevator_type *elv = e->elevator_type;
        struct elevator_type *__e;
        int len = 0;
index 2f7feda61e35fc3cfd8dc74264300baeeaa56826..d84a7df1e2a094e663f7e1683bb941f1c3562e2d 100644 (file)
@@ -181,6 +181,12 @@ void disk_part_iter_exit(struct disk_part_iter *piter)
 }
 EXPORT_SYMBOL_GPL(disk_part_iter_exit);
 
+static inline int sector_in_part(struct hd_struct *part, sector_t sector)
+{
+       return part->start_sect <= sector &&
+               sector < part->start_sect + part->nr_sects;
+}
+
 /**
  * disk_map_sector_rcu - map sector to partition
  * @disk: gendisk of interest
@@ -199,16 +205,22 @@ EXPORT_SYMBOL_GPL(disk_part_iter_exit);
 struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
 {
        struct disk_part_tbl *ptbl;
+       struct hd_struct *part;
        int i;
 
        ptbl = rcu_dereference(disk->part_tbl);
 
+       part = rcu_dereference(ptbl->last_lookup);
+       if (part && sector_in_part(part, sector))
+               return part;
+
        for (i = 1; i < ptbl->len; i++) {
-               struct hd_struct *part = rcu_dereference(ptbl->part[i]);
+               part = rcu_dereference(ptbl->part[i]);
 
-               if (part && part->start_sect <= sector &&
-                   sector < part->start_sect + part->nr_sects)
+               if (part && sector_in_part(part, sector)) {
+                       rcu_assign_pointer(ptbl->last_lookup, part);
                        return part;
+               }
        }
        return &disk->part0;
 }
@@ -888,8 +900,11 @@ static void disk_replace_part_tbl(struct gendisk *disk,
        struct disk_part_tbl *old_ptbl = disk->part_tbl;
 
        rcu_assign_pointer(disk->part_tbl, new_ptbl);
-       if (old_ptbl)
+
+       if (old_ptbl) {
+               rcu_assign_pointer(old_ptbl->last_lookup, NULL);
                call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb);
+       }
 }
 
 /**
index d03985b04d6768bd0966c3c75f27b6a1336ede1b..0f22e629b13c8e93bf46401c0913605b5d2cbfba 100644 (file)
@@ -323,9 +323,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
                bdi = blk_get_backing_dev_info(bdev);
                if (bdi == NULL)
                        return -ENOTTY;
-               lock_kernel();
                bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
-               unlock_kernel();
                return 0;
        case BLKBSZSET:
                /* set the logical block size */
index c23e029696508dde4161e1359acd882a6dc8a6de..3a0d369d08c7488f8f475091ef3ee6b5dfe46211 100644 (file)
@@ -76,7 +76,7 @@ static void *noop_init_queue(struct request_queue *q)
        return nd;
 }
 
-static void noop_exit_queue(elevator_t *e)
+static void noop_exit_queue(struct elevator_queue *e)
 {
        struct noop_data *nd = e->elevator_data;
 
index d0bb92cbefb9d698fc4756ec9041aeb2f5794005..ee9c67d7e1bea2015bc5d8a86287d001e33f20d1 100644 (file)
@@ -60,7 +60,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
 
 static int sg_get_timeout(struct request_queue *q)
 {
-       return q->sg_timeout / (HZ / USER_HZ);
+       return jiffies_to_clock_t(q->sg_timeout);
 }
 
 static int sg_set_timeout(struct request_queue *q, int __user *p)
@@ -68,7 +68,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)
        int timeout, err = get_user(timeout, p);
 
        if (!err)
-               q->sg_timeout = timeout * (HZ / USER_HZ);
+               q->sg_timeout = clock_t_to_jiffies(timeout);
 
        return err;
 }
index 9f7c543cc04b265344b0e37443c4349147e88574..01e69383d9c07ece3df6e56d23b4d3b2bf687475 100644 (file)
@@ -164,7 +164,7 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 
 static int cciss_revalidate(struct gendisk *disk);
 static int rebuild_lun_table(ctlr_info_t *h, int first_time);
-static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
+static int deregister_disk(ctlr_info_t *h, int drv_index,
                           int clear_all);
 
 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
@@ -215,31 +215,17 @@ static struct block_device_operations cciss_fops = {
 /*
  * Enqueuing and dequeuing functions for cmdlists.
  */
-static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
+static inline void addQ(struct hlist_head *list, CommandList_struct *c)
 {
-       if (*Qptr == NULL) {
-               *Qptr = c;
-               c->next = c->prev = c;
-       } else {
-               c->prev = (*Qptr)->prev;
-               c->next = (*Qptr);
-               (*Qptr)->prev->next = c;
-               (*Qptr)->prev = c;
-       }
+       hlist_add_head(&c->list, list);
 }
 
-static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
-                                         CommandList_struct *c)
+static inline void removeQ(CommandList_struct *c)
 {
-       if (c && c->next != c) {
-               if (*Qptr == c)
-                       *Qptr = c->next;
-               c->prev->next = c->next;
-               c->next->prev = c->prev;
-       } else {
-               *Qptr = NULL;
-       }
-       return c;
+       if (WARN_ON(hlist_unhashed(&c->list)))
+               return;
+
+       hlist_del_init(&c->list);
 }
 
 #include "cciss_scsi.c"                /* For SCSI tape support */
@@ -506,6 +492,7 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
                c->cmdindex = i;
        }
 
+       INIT_HLIST_NODE(&c->list);
        c->busaddr = (__u32) cmd_dma_handle;
        temp64.val = (__u64) err_dma_handle;
        c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -1492,8 +1479,7 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
                 * which keeps the interrupt handler from starting
                 * the queue.
                 */
-               ret = deregister_disk(h->gendisk[drv_index],
-                                     &h->drv[drv_index], 0);
+               ret = deregister_disk(h, drv_index, 0);
                h->drv[drv_index].busy_configuring = 0;
        }
 
@@ -1711,8 +1697,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
                        spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
                        h->drv[i].busy_configuring = 1;
                        spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-                       return_code = deregister_disk(h->gendisk[i],
-                               &h->drv[i], 1);
+                       return_code = deregister_disk(h, i, 1);
                        h->drv[i].busy_configuring = 0;
                }
        }
@@ -1782,15 +1767,19 @@ mem_msg:
  *             the highest_lun should be left unchanged and the LunID
  *             should not be cleared.
 */
-static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
+static int deregister_disk(ctlr_info_t *h, int drv_index,
                           int clear_all)
 {
        int i;
-       ctlr_info_t *h = get_host(disk);
+       struct gendisk *disk;
+       drive_info_struct *drv;
 
        if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
 
+       drv = &h->drv[drv_index];
+       disk = h->gendisk[drv_index];
+
        /* make sure logical volume is NOT is use */
        if (clear_all || (h->gendisk[0] == disk)) {
                if (drv->usage_count > 1)
@@ -2548,7 +2537,8 @@ static void start_io(ctlr_info_t *h)
 {
        CommandList_struct *c;
 
-       while ((c = h->reqQ) != NULL) {
+       while (!hlist_empty(&h->reqQ)) {
+               c = hlist_entry(h->reqQ.first, CommandList_struct, list);
                /* can't do anything if fifo is full */
                if ((h->access.fifo_full(h))) {
                        printk(KERN_WARNING "cciss: fifo full\n");
@@ -2556,14 +2546,14 @@ static void start_io(ctlr_info_t *h)
                }
 
                /* Get the first entry from the Request Q */
-               removeQ(&(h->reqQ), c);
+               removeQ(c);
                h->Qdepth--;
 
                /* Tell the controller execute command */
                h->access.submit_command(h, c);
 
                /* Put job onto the completed Q */
-               addQ(&(h->cmpQ), c);
+               addQ(&h->cmpQ, c);
        }
 }
 
@@ -2576,7 +2566,7 @@ static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
        memset(c->err_info, 0, sizeof(ErrorInfo_struct));
 
        /* add it to software queue and then send it to the controller */
-       addQ(&(h->reqQ), c);
+       addQ(&h->reqQ, c);
        h->Qdepth++;
        if (h->Qdepth > h->maxQsinceinit)
                h->maxQsinceinit = h->Qdepth;
@@ -2897,7 +2887,7 @@ static void do_cciss_request(struct request_queue *q)
 
        spin_lock_irq(q->queue_lock);
 
-       addQ(&(h->reqQ), c);
+       addQ(&h->reqQ, c);
        h->Qdepth++;
        if (h->Qdepth > h->maxQsinceinit)
                h->maxQsinceinit = h->Qdepth;
@@ -2985,16 +2975,12 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
                                a = c->busaddr;
 
                        } else {
+                               struct hlist_node *tmp;
+
                                a &= ~3;
-                               if ((c = h->cmpQ) == NULL) {
-                                       printk(KERN_WARNING
-                                              "cciss: Completion of %08x ignored\n",
-                                              a1);
-                                       continue;
-                               }
-                               while (c->busaddr != a) {
-                                       c = c->next;
-                                       if (c == h->cmpQ)
+                               c = NULL;
+                               hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+                                       if (c->busaddr == a)
                                                break;
                                }
                        }
@@ -3002,8 +2988,8 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
                         * If we've found the command, take it off the
                         * completion Q and free it
                         */
-                       if (c->busaddr == a) {
-                               removeQ(&h->cmpQ, c);
+                       if (c && c->busaddr == a) {
+                               removeQ(c);
                                if (c->cmd_type == CMD_RWREQ) {
                                        complete_command(h, c, 0);
                                } else if (c->cmd_type == CMD_IOCTL_PEND) {
@@ -3423,6 +3409,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
                return -1;
 
        hba[i]->busy_initializing = 1;
+       INIT_HLIST_HEAD(&hba[i]->cmpQ);
+       INIT_HLIST_HEAD(&hba[i]->reqQ);
 
        if (cciss_pci_init(hba[i], pdev) != 0)
                goto clean1;
@@ -3730,15 +3718,17 @@ static void fail_all_cmds(unsigned long ctlr)
        pci_disable_device(h->pdev);    /* Make sure it is really dead. */
 
        /* move everything off the request queue onto the completed queue */
-       while ((c = h->reqQ) != NULL) {
-               removeQ(&(h->reqQ), c);
+       while (!hlist_empty(&h->reqQ)) {
+               c = hlist_entry(h->reqQ.first, CommandList_struct, list);
+               removeQ(c);
                h->Qdepth--;
-               addQ(&(h->cmpQ), c);
+               addQ(&h->cmpQ, c);
        }
 
        /* Now, fail everything on the completed queue with a HW error */
-       while ((c = h->cmpQ) != NULL) {
-               removeQ(&h->cmpQ, c);
+       while (!hlist_empty(&h->cmpQ)) {
+               c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
+               removeQ(c);
                c->err_info->CommandStatus = CMD_HARDWARE_ERR;
                if (c->cmd_type == CMD_RWREQ) {
                        complete_command(h, c, 0);
index 24a7efa993ab5d1dd655c68c60e57b41a6f5ebdc..15e2b84734e3ad68df867b26a47b98ea9e982326 100644 (file)
@@ -89,8 +89,8 @@ struct ctlr_info
        struct access_method access;
 
        /* queue and queue Info */ 
-       CommandList_struct *reqQ;
-       CommandList_struct  *cmpQ;
+       struct hlist_head reqQ;
+       struct hlist_head cmpQ;
        unsigned int Qdepth;
        unsigned int maxQsinceinit;
        unsigned int maxSG;
index 43bf5593b59bf6ed1da79abbbba46a0fea499d44..24e22dea1a99e2ee2d8ec297b0dd69b624acff15 100644 (file)
@@ -265,8 +265,7 @@ typedef struct _CommandList_struct {
   int                     ctlr;
   int                     cmd_type; 
   long                    cmdindex;
-  struct _CommandList_struct *prev;
-  struct _CommandList_struct *next;
+  struct hlist_node list;
   struct request *        rq;
   struct completion *waiting;
   int   retry_count;
index fb06ed6592121fe8dbdfa79da727f2a6eac921f5..edbaac6c05739ab183e69f94c1dc230acea27176 100644 (file)
@@ -623,6 +623,18 @@ static int loop_switch(struct loop_device *lo, struct file *file)
        return 0;
 }
 
+/*
+ * Helper to flush the IOs in loop, but keeping loop thread running
+ */
+static int loop_flush(struct loop_device *lo)
+{
+       /* loop not yet configured, no running thread, nothing to flush */
+       if (!lo->lo_thread)
+               return 0;
+
+       return loop_switch(lo, NULL);
+}
+
 /*
  * Do the actual switch; called from the BIO completion routine
  */
@@ -630,14 +642,20 @@ static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
 {
        struct file *file = p->file;
        struct file *old_file = lo->lo_backing_file;
-       struct address_space *mapping = file->f_mapping;
+       struct address_space *mapping;
+
+       /* if no new file, only flush of queued bios requested */
+       if (!file)
+               goto out;
 
+       mapping = file->f_mapping;
        mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
        lo->lo_backing_file = file;
        lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
                mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
        lo->old_gfp_mask = mapping_gfp_mask(mapping);
        mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+out:
        complete(&p->wait);
 }
 
@@ -901,6 +919,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
 
        kthread_stop(lo->lo_thread);
 
+       lo->lo_queue->unplug_fn = NULL;
        lo->lo_backing_file = NULL;
 
        loop_release_xfer(lo);
@@ -1345,11 +1364,25 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
        struct loop_device *lo = disk->private_data;
 
        mutex_lock(&lo->lo_ctl_mutex);
-       --lo->lo_refcnt;
 
-       if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) && !lo->lo_refcnt)
+       if (--lo->lo_refcnt)
+               goto out;
+
+       if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
+               /*
+                * In autoclear mode, stop the loop thread
+                * and remove configuration after last close.
+                */
                loop_clr_fd(lo, NULL);
+       } else {
+               /*
+                * Otherwise keep thread (if running) and config,
+                * but flush possible ongoing bios in thread.
+                */
+               loop_flush(lo);
+       }
 
+out:
        mutex_unlock(&lo->lo_ctl_mutex);
 
        return 0;
index d3a91cacee8c6aff410bc9a8567ce89cb1ae477f..7bcc1d8bc96724add86213f7f85ec01ed48a8314 100644 (file)
@@ -722,7 +722,6 @@ static int __init nbd_init(void)
 
        for (i = 0; i < nbds_max; i++) {
                struct gendisk *disk = alloc_disk(1 << part_shift);
-               elevator_t *old_e;
                if (!disk)
                        goto out;
                nbd_dev[i].disk = disk;
@@ -736,11 +735,10 @@ static int __init nbd_init(void)
                        put_disk(disk);
                        goto out;
                }
-               old_e = disk->queue->elevator;
-               if (elevator_init(disk->queue, "deadline") == 0 ||
-                       elevator_init(disk->queue, "noop") == 0) {
-                               elevator_exit(old_e);
-               }
+               /*
+                * Tell the block layer that we are not a rotational device
+                */
+               queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
        }
 
        if (register_blkdev(NBD_MAJOR, "nbd")) {
index 85d79a02d48726c150e6f8f1b17889f84bc8cc8d..f151592ecf738d42866a2a9f7fef818d4ca513b0 100644 (file)
@@ -237,6 +237,8 @@ static int virtblk_probe(struct virtio_device *vdev)
                goto out_put_disk;
        }
 
+       queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
+
        if (index < 26) {
                sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
        } else if (index < (26 + 1) * 26) {
index 2d19f0cc47f272c5ae4f96ee97ea34a1bce5a407..918ef725de41c849f3f78b7e30f80d7709e01a39 100644 (file)
@@ -338,18 +338,12 @@ wait:
 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
 {
        struct request_queue *rq;
-       elevator_t *old_e;
 
        rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
        if (rq == NULL)
                return -1;
 
-       old_e = rq->elevator;
-       if (IS_ERR_VALUE(elevator_init(rq, "noop")))
-               printk(KERN_WARNING
-                       "blkfront: Switch elevator failed, use default\n");
-       else
-               elevator_exit(old_e);
+       queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
 
        /* Hard sector size and max sectors impersonate the equiv. hardware. */
        blk_queue_hardsect_size(rq, sector_size);
index 7d2e91cccb13e99068a4499bc469ef9bc032c67b..cceace61ef286622b93af85fb5c45d134c289eae 100644 (file)
@@ -1712,29 +1712,30 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
        return 0;
 }
 
-static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
+static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s,
+                               struct packet_command *cgc)
 {
        unsigned char buf[21], *base;
        struct dvd_layer *layer;
-       struct packet_command cgc;
        struct cdrom_device_ops *cdo = cdi->ops;
        int ret, layer_num = s->physical.layer_num;
 
        if (layer_num >= DVD_LAYERS)
                return -EINVAL;
 
-       init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
-       cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
-       cgc.cmd[6] = layer_num;
-       cgc.cmd[7] = s->type;
-       cgc.cmd[9] = cgc.buflen & 0xff;
+       init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
+       cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+       cgc->cmd[6] = layer_num;
+       cgc->cmd[7] = s->type;
+       cgc->cmd[9] = cgc->buflen & 0xff;
 
        /*
         * refrain from reporting errors on non-existing layers (mainly)
         */
-       cgc.quiet = 1;
+       cgc->quiet = 1;
 
-       if ((ret = cdo->generic_packet(cdi, &cgc)))
+       ret = cdo->generic_packet(cdi, cgc);
+       if (ret)
                return ret;
 
        base = &buf[4];
@@ -1762,21 +1763,22 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
        return 0;
 }
 
-static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s)
+static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s,
+                               struct packet_command *cgc)
 {
        int ret;
        u_char buf[8];
-       struct packet_command cgc;
        struct cdrom_device_ops *cdo = cdi->ops;
 
-       init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
-       cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
-       cgc.cmd[6] = s->copyright.layer_num;
-       cgc.cmd[7] = s->type;
-       cgc.cmd[8] = cgc.buflen >> 8;
-       cgc.cmd[9] = cgc.buflen & 0xff;
+       init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
+       cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+       cgc->cmd[6] = s->copyright.layer_num;
+       cgc->cmd[7] = s->type;
+       cgc->cmd[8] = cgc->buflen >> 8;
+       cgc->cmd[9] = cgc->buflen & 0xff;
 
-       if ((ret = cdo->generic_packet(cdi, &cgc)))
+       ret = cdo->generic_packet(cdi, cgc);
+       if (ret)
                return ret;
 
        s->copyright.cpst = buf[4];
@@ -1785,79 +1787,89 @@ static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s)
        return 0;
 }
 
-static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s)
+static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s,
+                               struct packet_command *cgc)
 {
        int ret, size;
        u_char *buf;
-       struct packet_command cgc;
        struct cdrom_device_ops *cdo = cdi->ops;
 
        size = sizeof(s->disckey.value) + 4;
 
-       if ((buf = kmalloc(size, GFP_KERNEL)) == NULL)
+       buf = kmalloc(size, GFP_KERNEL);
+       if (!buf)
                return -ENOMEM;
 
-       init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
-       cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
-       cgc.cmd[7] = s->type;
-       cgc.cmd[8] = size >> 8;
-       cgc.cmd[9] = size & 0xff;
-       cgc.cmd[10] = s->disckey.agid << 6;
+       init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
+       cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+       cgc->cmd[7] = s->type;
+       cgc->cmd[8] = size >> 8;
+       cgc->cmd[9] = size & 0xff;
+       cgc->cmd[10] = s->disckey.agid << 6;
 
-       if (!(ret = cdo->generic_packet(cdi, &cgc)))
+       ret = cdo->generic_packet(cdi, cgc);
+       if (!ret)
                memcpy(s->disckey.value, &buf[4], sizeof(s->disckey.value));
 
        kfree(buf);
        return ret;
 }
 
-static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s)
+static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s,
+                       struct packet_command *cgc)
 {
-       int ret;
-       u_char buf[4 + 188];
-       struct packet_command cgc;
+       int ret, size = 4 + 188;
+       u_char *buf;
        struct cdrom_device_ops *cdo = cdi->ops;
 
-       init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
-       cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
-       cgc.cmd[7] = s->type;
-       cgc.cmd[9] = cgc.buflen & 0xff;
+       buf = kmalloc(size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
 
-       if ((ret = cdo->generic_packet(cdi, &cgc)))
-               return ret;
+       init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
+       cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+       cgc->cmd[7] = s->type;
+       cgc->cmd[9] = cgc->buflen & 0xff;
+
+       ret = cdo->generic_packet(cdi, cgc);
+       if (ret)
+               goto out;
 
        s->bca.len = buf[0] << 8 | buf[1];
        if (s->bca.len < 12 || s->bca.len > 188) {
                cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
        memcpy(s->bca.value, &buf[4], s->bca.len);
-
-       return 0;
+       ret = 0;
+out:
+       kfree(buf);
+       return ret;
 }
 
-static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s)
+static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
+                               struct packet_command *cgc)
 {
        int ret = 0, size;
        u_char *buf;
-       struct packet_command cgc;
        struct cdrom_device_ops *cdo = cdi->ops;
 
        size = sizeof(s->manufact.value) + 4;
 
-       if ((buf = kmalloc(size, GFP_KERNEL)) == NULL)
+       buf = kmalloc(size, GFP_KERNEL);
+       if (!buf)
                return -ENOMEM;
 
-       init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
-       cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
-       cgc.cmd[7] = s->type;
-       cgc.cmd[8] = size >> 8;
-       cgc.cmd[9] = size & 0xff;
+       init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
+       cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+       cgc->cmd[7] = s->type;
+       cgc->cmd[8] = size >> 8;
+       cgc->cmd[9] = size & 0xff;
 
-       if ((ret = cdo->generic_packet(cdi, &cgc))) {
-               kfree(buf);
-               return ret;
-       }
+       ret = cdo->generic_packet(cdi, cgc);
+       if (ret)
+               goto out;
 
        s->manufact.len = buf[0] << 8 | buf[1];
        if (s->manufact.len < 0 || s->manufact.len > 2048) {
@@ -1868,27 +1880,29 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s)
                memcpy(s->manufact.value, &buf[4], s->manufact.len);
        }
 
+out:
        kfree(buf);
        return ret;
 }
 
-static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s)
+static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s,
+                               struct packet_command *cgc)
 {
        switch (s->type) {
        case DVD_STRUCT_PHYSICAL:
-               return dvd_read_physical(cdi, s);
+               return dvd_read_physical(cdi, s, cgc);
 
        case DVD_STRUCT_COPYRIGHT:
-               return dvd_read_copyright(cdi, s);
+               return dvd_read_copyright(cdi, s, cgc);
 
        case DVD_STRUCT_DISCKEY:
-               return dvd_read_disckey(cdi, s);
+               return dvd_read_disckey(cdi, s, cgc);
 
        case DVD_STRUCT_BCA:
-               return dvd_read_bca(cdi, s);
+               return dvd_read_bca(cdi, s, cgc);
 
        case DVD_STRUCT_MANUFACT:
-               return dvd_read_manufact(cdi, s);
+               return dvd_read_manufact(cdi, s, cgc);
                
        default:
                cdinfo(CD_WARNING, ": Invalid DVD structure read requested (%d)\n",
@@ -2787,271 +2801,360 @@ static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size)
        return cdo->generic_packet(cdi, &cgc);
 }
 
-static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
-                    unsigned long arg)
-{              
-       struct cdrom_device_ops *cdo = cdi->ops;
-       struct packet_command cgc;
+static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
+                                       void __user *arg,
+                                       struct packet_command *cgc,
+                                       int cmd)
+{
        struct request_sense sense;
-       unsigned char buffer[32];
-       int ret = 0;
-
-       memset(&cgc, 0, sizeof(cgc));
+       struct cdrom_msf msf;
+       int blocksize = 0, format = 0, lba;
+       int ret;
 
-       /* build a unified command and queue it through
-          cdo->generic_packet() */
        switch (cmd) {
        case CDROMREADRAW:
+               blocksize = CD_FRAMESIZE_RAW;
+               break;
        case CDROMREADMODE1:
-       case CDROMREADMODE2: {
-               struct cdrom_msf msf;
-               int blocksize = 0, format = 0, lba;
-               
-               switch (cmd) {
-               case CDROMREADRAW:
-                       blocksize = CD_FRAMESIZE_RAW;
-                       break;
-               case CDROMREADMODE1:
-                       blocksize = CD_FRAMESIZE;
-                       format = 2;
-                       break;
-               case CDROMREADMODE2:
-                       blocksize = CD_FRAMESIZE_RAW0;
-                       break;
-               }
-               IOCTL_IN(arg, struct cdrom_msf, msf);
-               lba = msf_to_lba(msf.cdmsf_min0,msf.cdmsf_sec0,msf.cdmsf_frame0);
-               /* FIXME: we need upper bound checking, too!! */
-               if (lba < 0)
-                       return -EINVAL;
-               cgc.buffer = kmalloc(blocksize, GFP_KERNEL);
-               if (cgc.buffer == NULL)
-                       return -ENOMEM;
-               memset(&sense, 0, sizeof(sense));
-               cgc.sense = &sense;
-               cgc.data_direction = CGC_DATA_READ;
-               ret = cdrom_read_block(cdi, &cgc, lba, 1, format, blocksize);
-               if (ret && sense.sense_key==0x05 && sense.asc==0x20 && sense.ascq==0x00) {
-                       /*
-                        * SCSI-II devices are not required to support
-                        * READ_CD, so let's try switching block size
-                        */
-                       /* FIXME: switch back again... */
-                       if ((ret = cdrom_switch_blocksize(cdi, blocksize))) {
-                               kfree(cgc.buffer);
-                               return ret;
-                       }
-                       cgc.sense = NULL;
-                       ret = cdrom_read_cd(cdi, &cgc, lba, blocksize, 1);
-                       ret |= cdrom_switch_blocksize(cdi, blocksize);
-               }
-               if (!ret && copy_to_user((char __user *)arg, cgc.buffer, blocksize))
-                       ret = -EFAULT;
-               kfree(cgc.buffer);
+               blocksize = CD_FRAMESIZE;
+               format = 2;
+               break;
+       case CDROMREADMODE2:
+               blocksize = CD_FRAMESIZE_RAW0;
+               break;
+       }
+       IOCTL_IN(arg, struct cdrom_msf, msf);
+       lba = msf_to_lba(msf.cdmsf_min0, msf.cdmsf_sec0, msf.cdmsf_frame0);
+       /* FIXME: we need upper bound checking, too!! */
+       if (lba < 0)
+               return -EINVAL;
+
+       cgc->buffer = kmalloc(blocksize, GFP_KERNEL);
+       if (cgc->buffer == NULL)
+               return -ENOMEM;
+
+       memset(&sense, 0, sizeof(sense));
+       cgc->sense = &sense;
+       cgc->data_direction = CGC_DATA_READ;
+       ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize);
+       if (ret && sense.sense_key == 0x05 &&
+                  sense.asc == 0x20 &&
+                  sense.ascq == 0x00) {
+               /*
+                * SCSI-II devices are not required to support
+                * READ_CD, so let's try switching block size
+                */
+               /* FIXME: switch back again... */
+               ret = cdrom_switch_blocksize(cdi, blocksize);
+               if (ret)
+                       goto out;
+               cgc->sense = NULL;
+               ret = cdrom_read_cd(cdi, cgc, lba, blocksize, 1);
+               ret |= cdrom_switch_blocksize(cdi, blocksize);
+       }
+       if (!ret && copy_to_user(arg, cgc->buffer, blocksize))
+               ret = -EFAULT;
+out:
+       kfree(cgc->buffer);
+       return ret;
+}
+
+static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi,
+                                       void __user *arg)
+{
+       struct cdrom_read_audio ra;
+       int lba;
+
+       IOCTL_IN(arg, struct cdrom_read_audio, ra);
+
+       if (ra.addr_format == CDROM_MSF)
+               lba = msf_to_lba(ra.addr.msf.minute,
+                                ra.addr.msf.second,
+                                ra.addr.msf.frame);
+       else if (ra.addr_format == CDROM_LBA)
+               lba = ra.addr.lba;
+       else
+               return -EINVAL;
+
+       /* FIXME: we need upper bound checking, too!! */
+       if (lba < 0 || ra.nframes <= 0 || ra.nframes > CD_FRAMES)
+               return -EINVAL;
+
+       return cdrom_read_cdda(cdi, ra.buf, lba, ra.nframes);
+}
+
+static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi,
+                                       void __user *arg)
+{
+       int ret;
+       struct cdrom_subchnl q;
+       u_char requested, back;
+       IOCTL_IN(arg, struct cdrom_subchnl, q);
+       requested = q.cdsc_format;
+       if (!((requested == CDROM_MSF) ||
+             (requested == CDROM_LBA)))
+               return -EINVAL;
+       q.cdsc_format = CDROM_MSF;
+       ret = cdrom_read_subchannel(cdi, &q, 0);
+       if (ret)
                return ret;
-               }
-       case CDROMREADAUDIO: {
-               struct cdrom_read_audio ra;
-               int lba;
-
-               IOCTL_IN(arg, struct cdrom_read_audio, ra);
-
-               if (ra.addr_format == CDROM_MSF)
-                       lba = msf_to_lba(ra.addr.msf.minute,
-                                        ra.addr.msf.second,
-                                        ra.addr.msf.frame);
-               else if (ra.addr_format == CDROM_LBA)
-                       lba = ra.addr.lba;
-               else
-                       return -EINVAL;
+       back = q.cdsc_format; /* local copy */
+       sanitize_format(&q.cdsc_absaddr, &back, requested);
+       sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
+       IOCTL_OUT(arg, struct cdrom_subchnl, q);
+       /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
+       return 0;
+}
 
-               /* FIXME: we need upper bound checking, too!! */
-               if (lba < 0 || ra.nframes <= 0 || ra.nframes > CD_FRAMES)
-                       return -EINVAL;
+static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi,
+                                       void __user *arg,
+                                       struct packet_command *cgc)
+{
+       struct cdrom_device_ops *cdo = cdi->ops;
+       struct cdrom_msf msf;
+       cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
+       IOCTL_IN(arg, struct cdrom_msf, msf);
+       cgc->cmd[0] = GPCMD_PLAY_AUDIO_MSF;
+       cgc->cmd[3] = msf.cdmsf_min0;
+       cgc->cmd[4] = msf.cdmsf_sec0;
+       cgc->cmd[5] = msf.cdmsf_frame0;
+       cgc->cmd[6] = msf.cdmsf_min1;
+       cgc->cmd[7] = msf.cdmsf_sec1;
+       cgc->cmd[8] = msf.cdmsf_frame1;
+       cgc->data_direction = CGC_DATA_NONE;
+       return cdo->generic_packet(cdi, cgc);
+}
 
-               return cdrom_read_cdda(cdi, ra.buf, lba, ra.nframes);
-               }
-       case CDROMSUBCHNL: {
-               struct cdrom_subchnl q;
-               u_char requested, back;
-               IOCTL_IN(arg, struct cdrom_subchnl, q);
-               requested = q.cdsc_format;
-               if (!((requested == CDROM_MSF) ||
-                     (requested == CDROM_LBA)))
-                       return -EINVAL;
-               q.cdsc_format = CDROM_MSF;
-               if ((ret = cdrom_read_subchannel(cdi, &q, 0)))
-                       return ret;
-               back = q.cdsc_format; /* local copy */
-               sanitize_format(&q.cdsc_absaddr, &back, requested);
-               sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
-               IOCTL_OUT(arg, struct cdrom_subchnl, q);
-               /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ 
-               return 0;
-               }
-       case CDROMPLAYMSF: {
-               struct cdrom_msf msf;
-               cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
-               IOCTL_IN(arg, struct cdrom_msf, msf);
-               cgc.cmd[0] = GPCMD_PLAY_AUDIO_MSF;
-               cgc.cmd[3] = msf.cdmsf_min0;
-               cgc.cmd[4] = msf.cdmsf_sec0;
-               cgc.cmd[5] = msf.cdmsf_frame0;
-               cgc.cmd[6] = msf.cdmsf_min1;
-               cgc.cmd[7] = msf.cdmsf_sec1;
-               cgc.cmd[8] = msf.cdmsf_frame1;
-               cgc.data_direction = CGC_DATA_NONE;
-               return cdo->generic_packet(cdi, &cgc);
-               }
-       case CDROMPLAYBLK: {
-               struct cdrom_blk blk;
-               cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
-               IOCTL_IN(arg, struct cdrom_blk, blk);
-               cgc.cmd[0] = GPCMD_PLAY_AUDIO_10;
-               cgc.cmd[2] = (blk.from >> 24) & 0xff;
-               cgc.cmd[3] = (blk.from >> 16) & 0xff;
-               cgc.cmd[4] = (blk.from >>  8) & 0xff;
-               cgc.cmd[5] = blk.from & 0xff;
-               cgc.cmd[7] = (blk.len >> 8) & 0xff;
-               cgc.cmd[8] = blk.len & 0xff;
-               cgc.data_direction = CGC_DATA_NONE;
-               return cdo->generic_packet(cdi, &cgc);
-               }
-       case CDROMVOLCTRL:
-       case CDROMVOLREAD: {
-               struct cdrom_volctrl volctrl;
-               char mask[sizeof(buffer)];
-               unsigned short offset;
+static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi,
+                                       void __user *arg,
+                                       struct packet_command *cgc)
+{
+       struct cdrom_device_ops *cdo = cdi->ops;
+       struct cdrom_blk blk;
+       cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
+       IOCTL_IN(arg, struct cdrom_blk, blk);
+       cgc->cmd[0] = GPCMD_PLAY_AUDIO_10;
+       cgc->cmd[2] = (blk.from >> 24) & 0xff;
+       cgc->cmd[3] = (blk.from >> 16) & 0xff;
+       cgc->cmd[4] = (blk.from >>  8) & 0xff;
+       cgc->cmd[5] = blk.from & 0xff;
+       cgc->cmd[7] = (blk.len >> 8) & 0xff;
+       cgc->cmd[8] = blk.len & 0xff;
+       cgc->data_direction = CGC_DATA_NONE;
+       return cdo->generic_packet(cdi, cgc);
+}
+
+static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
+                                       void __user *arg,
+                                       struct packet_command *cgc,
+                                       unsigned int cmd)
+{
+       struct cdrom_volctrl volctrl;
+       unsigned char buffer[32];
+       char mask[sizeof(buffer)];
+       unsigned short offset;
+       int ret;
 
-               cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n");
+       cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n");
 
-               IOCTL_IN(arg, struct cdrom_volctrl, volctrl);
+       IOCTL_IN(arg, struct cdrom_volctrl, volctrl);
 
-               cgc.buffer = buffer;
-               cgc.buflen = 24;
-               if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_AUDIO_CTL_PAGE, 0)))
-                   return ret;
+       cgc->buffer = buffer;
+       cgc->buflen = 24;
+       ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 0);
+       if (ret)
+               return ret;
                
-               /* originally the code depended on buffer[1] to determine
-                  how much data is available for transfer. buffer[1] is
-                  unfortunately ambigious and the only reliable way seem
-                  to be to simply skip over the block descriptor... */
-               offset = 8 + be16_to_cpu(*(__be16 *)(buffer+6));
-
-               if (offset + 16 > sizeof(buffer))
-                       return -E2BIG;
-
-               if (offset + 16 > cgc.buflen) {
-                       cgc.buflen = offset+16;
-                       ret = cdrom_mode_sense(cdi, &cgc,
-                                               GPMODE_AUDIO_CTL_PAGE, 0);
-                       if (ret)
-                               return ret;
-               }
+       /* originally the code depended on buffer[1] to determine
+          how much data is available for transfer. buffer[1] is
+          unfortunately ambigious and the only reliable way seem
+          to be to simply skip over the block descriptor... */
+       offset = 8 + be16_to_cpu(*(__be16 *)(buffer + 6));
+
+       if (offset + 16 > sizeof(buffer))
+               return -E2BIG;
+
+       if (offset + 16 > cgc->buflen) {
+               cgc->buflen = offset + 16;
+               ret = cdrom_mode_sense(cdi, cgc,
+                                       GPMODE_AUDIO_CTL_PAGE, 0);
+               if (ret)
+                       return ret;
+       }
 
-               /* sanity check */
-               if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
-                               buffer[offset+1] < 14)
-                       return -EINVAL;
+       /* sanity check */
+       if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
+                       buffer[offset + 1] < 14)
+               return -EINVAL;
 
-               /* now we have the current volume settings. if it was only
-                  a CDROMVOLREAD, return these values */
-               if (cmd == CDROMVOLREAD) {
-                       volctrl.channel0 = buffer[offset+9];
-                       volctrl.channel1 = buffer[offset+11];
-                       volctrl.channel2 = buffer[offset+13];
-                       volctrl.channel3 = buffer[offset+15];
-                       IOCTL_OUT(arg, struct cdrom_volctrl, volctrl);
-                       return 0;
-               }
+       /* now we have the current volume settings. if it was only
+          a CDROMVOLREAD, return these values */
+       if (cmd == CDROMVOLREAD) {
+               volctrl.channel0 = buffer[offset+9];
+               volctrl.channel1 = buffer[offset+11];
+               volctrl.channel2 = buffer[offset+13];
+               volctrl.channel3 = buffer[offset+15];
+               IOCTL_OUT(arg, struct cdrom_volctrl, volctrl);
+               return 0;
+       }
                
-               /* get the volume mask */
-               cgc.buffer = mask;
-               if ((ret = cdrom_mode_sense(cdi, &cgc, 
-                               GPMODE_AUDIO_CTL_PAGE, 1)))
-                       return ret;
+       /* get the volume mask */
+       cgc->buffer = mask;
+       ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 1);
+       if (ret)
+               return ret;
 
-               buffer[offset+9] = volctrl.channel0 & mask[offset+9];
-               buffer[offset+11] = volctrl.channel1 & mask[offset+11];
-               buffer[offset+13] = volctrl.channel2 & mask[offset+13];
-               buffer[offset+15] = volctrl.channel3 & mask[offset+15];
+       buffer[offset + 9]  = volctrl.channel0 & mask[offset + 9];
+       buffer[offset + 11] = volctrl.channel1 & mask[offset + 11];
+       buffer[offset + 13] = volctrl.channel2 & mask[offset + 13];
+       buffer[offset + 15] = volctrl.channel3 & mask[offset + 15];
 
-               /* set volume */
-               cgc.buffer = buffer + offset - 8;
-               memset(cgc.buffer, 0, 8);
-               return cdrom_mode_select(cdi, &cgc);
-               }
+       /* set volume */
+       cgc->buffer = buffer + offset - 8;
+       memset(cgc->buffer, 0, 8);
+       return cdrom_mode_select(cdi, cgc);
+}
 
-       case CDROMSTART:
-       case CDROMSTOP: {
-               cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n"); 
-               cgc.cmd[0] = GPCMD_START_STOP_UNIT;
-               cgc.cmd[1] = 1;
-               cgc.cmd[4] = (cmd == CDROMSTART) ? 1 : 0;
-               cgc.data_direction = CGC_DATA_NONE;
-               return cdo->generic_packet(cdi, &cgc);
-               }
+static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi,
+                                       struct packet_command *cgc,
+                                       int cmd)
+{
+       struct cdrom_device_ops *cdo = cdi->ops;
+       cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n");
+       cgc->cmd[0] = GPCMD_START_STOP_UNIT;
+       cgc->cmd[1] = 1;
+       cgc->cmd[4] = (cmd == CDROMSTART) ? 1 : 0;
+       cgc->data_direction = CGC_DATA_NONE;
+       return cdo->generic_packet(cdi, cgc);
+}
 
-       case CDROMPAUSE:
-       case CDROMRESUME: {
-               cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n"); 
-               cgc.cmd[0] = GPCMD_PAUSE_RESUME;
-               cgc.cmd[8] = (cmd == CDROMRESUME) ? 1 : 0;
-               cgc.data_direction = CGC_DATA_NONE;
-               return cdo->generic_packet(cdi, &cgc);
-               }
+static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi,
+                                       struct packet_command *cgc,
+                                       int cmd)
+{
+       struct cdrom_device_ops *cdo = cdi->ops;
+       cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n");
+       cgc->cmd[0] = GPCMD_PAUSE_RESUME;
+       cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0;
+       cgc->data_direction = CGC_DATA_NONE;
+       return cdo->generic_packet(cdi, cgc);
+}
 
-       case DVD_READ_STRUCT: {
-               dvd_struct *s;
-               int size = sizeof(dvd_struct);
-               if (!CDROM_CAN(CDC_DVD))
-                       return -ENOSYS;
-               if ((s = kmalloc(size, GFP_KERNEL)) == NULL)
-                       return -ENOMEM;
-               cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n"); 
-               if (copy_from_user(s, (dvd_struct __user *)arg, size)) {
-                       kfree(s);
-                       return -EFAULT;
-               }
-               if ((ret = dvd_read_struct(cdi, s))) {
-                       kfree(s);
-                       return ret;
-               }
-               if (copy_to_user((dvd_struct __user *)arg, s, size))
-                       ret = -EFAULT;
+static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi,
+                                               void __user *arg,
+                                               struct packet_command *cgc)
+{
+       int ret;
+       dvd_struct *s;
+       int size = sizeof(dvd_struct);
+
+       if (!CDROM_CAN(CDC_DVD))
+               return -ENOSYS;
+
+       s = kmalloc(size, GFP_KERNEL);
+       if (!s)
+               return -ENOMEM;
+
+       cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n");
+       if (copy_from_user(s, arg, size)) {
                kfree(s);
+               return -EFAULT;
+       }
+
+       ret = dvd_read_struct(cdi, s, cgc);
+       if (ret)
+               goto out;
+
+       if (copy_to_user(arg, s, size))
+               ret = -EFAULT;
+out:
+       kfree(s);
+       return ret;
+}
+
+static noinline int mmc_ioctl_dvd_auth(struct cdrom_device_info *cdi,
+                                       void __user *arg)
+{
+       int ret;
+       dvd_authinfo ai;
+       if (!CDROM_CAN(CDC_DVD))
+               return -ENOSYS;
+       cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n");
+       IOCTL_IN(arg, dvd_authinfo, ai);
+       ret = dvd_do_auth(cdi, &ai);
+       if (ret)
                return ret;
-               }
+       IOCTL_OUT(arg, dvd_authinfo, ai);
+       return 0;
+}
 
-       case DVD_AUTH: {
-               dvd_authinfo ai;
-               if (!CDROM_CAN(CDC_DVD))
-                       return -ENOSYS;
-               cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n"); 
-               IOCTL_IN(arg, dvd_authinfo, ai);
-               if ((ret = dvd_do_auth (cdi, &ai)))
-                       return ret;
-               IOCTL_OUT(arg, dvd_authinfo, ai);
-               return 0;
-               }
+static noinline int mmc_ioctl_cdrom_next_writable(struct cdrom_device_info *cdi,
+                                               void __user *arg)
+{
+       int ret;
+       long next = 0;
+       cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n");
+       ret = cdrom_get_next_writable(cdi, &next);
+       if (ret)
+               return ret;
+       IOCTL_OUT(arg, long, next);
+       return 0;
+}
 
-       case CDROM_NEXT_WRITABLE: {
-               long next = 0;
-               cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n"); 
-               if ((ret = cdrom_get_next_writable(cdi, &next)))
-                       return ret;
-               IOCTL_OUT(arg, long, next);
-               return 0;
-               }
-       case CDROM_LAST_WRITTEN: {
-               long last = 0;
-               cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n"); 
-               if ((ret = cdrom_get_last_written(cdi, &last)))
-                       return ret;
-               IOCTL_OUT(arg, long, last);
-               return 0;
-               }
-       } /* switch */
+static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi,
+                                               void __user *arg)
+{
+       int ret;
+       long last = 0;
+       cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n");
+       ret = cdrom_get_last_written(cdi, &last);
+       if (ret)
+               return ret;
+       IOCTL_OUT(arg, long, last);
+       return 0;
+}
+
+static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
+                    unsigned long arg)
+{
+       struct packet_command cgc;
+       void __user *userptr = (void __user *)arg;
+
+       memset(&cgc, 0, sizeof(cgc));
+
+       /* build a unified command and queue it through
+          cdo->generic_packet() */
+       switch (cmd) {
+       case CDROMREADRAW:
+       case CDROMREADMODE1:
+       case CDROMREADMODE2:
+               return mmc_ioctl_cdrom_read_data(cdi, userptr, &cgc, cmd);
+       case CDROMREADAUDIO:
+               return mmc_ioctl_cdrom_read_audio(cdi, userptr);
+       case CDROMSUBCHNL:
+               return mmc_ioctl_cdrom_subchannel(cdi, userptr);
+       case CDROMPLAYMSF:
+               return mmc_ioctl_cdrom_play_msf(cdi, userptr, &cgc);
+       case CDROMPLAYBLK:
+               return mmc_ioctl_cdrom_play_blk(cdi, userptr, &cgc);
+       case CDROMVOLCTRL:
+       case CDROMVOLREAD:
+               return mmc_ioctl_cdrom_volume(cdi, userptr, &cgc, cmd);
+       case CDROMSTART:
+       case CDROMSTOP:
+               return mmc_ioctl_cdrom_start_stop(cdi, &cgc, cmd);
+       case CDROMPAUSE:
+       case CDROMRESUME:
+               return mmc_ioctl_cdrom_pause_resume(cdi, &cgc, cmd);
+       case DVD_READ_STRUCT:
+               return mmc_ioctl_dvd_read_struct(cdi, userptr, &cgc);
+       case DVD_AUTH:
+               return mmc_ioctl_dvd_auth(cdi, userptr);
+       case CDROM_NEXT_WRITABLE:
+               return mmc_ioctl_cdrom_next_writable(cdi, userptr);
+       case CDROM_LAST_WRITTEN:
+               return mmc_ioctl_cdrom_last_written(cdi, userptr);
+       }
 
        return -ENOTTY;
 }
index 9cf6e9bb017e6dca0b537bb04b70e5c49dbf705b..c7714185f83103219de72565f34a3dadbb6e198d 100644 (file)
@@ -40,6 +40,8 @@
 #define PCI_DEVICE_ID_INTEL_Q45_IG          0x2E12
 #define PCI_DEVICE_ID_INTEL_G45_HB          0x2E20
 #define PCI_DEVICE_ID_INTEL_G45_IG          0x2E22
+#define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
+#define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
 
 /* cover 915 and 945 variants */
 #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -63,7 +65,8 @@
 #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB)
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB)
 
 extern int agp_memory_reserved;
 
@@ -1196,6 +1199,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
        case PCI_DEVICE_ID_INTEL_IGD_E_HB:
        case PCI_DEVICE_ID_INTEL_Q45_HB:
        case PCI_DEVICE_ID_INTEL_G45_HB:
+       case PCI_DEVICE_ID_INTEL_G41_HB:
                *gtt_offset = *gtt_size = MB(2);
                break;
        default:
@@ -2156,13 +2160,15 @@ static const struct intel_driver_description {
        { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
                NULL, &intel_g33_driver },
        { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
-           "Mobile Intel? GM45 Express", NULL, &intel_i965_driver },
+           "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
            "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
            "Q45/Q43", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
            "G45/G43", NULL, &intel_i965_driver },
+       { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
+           "G41", NULL, &intel_i965_driver },
        { 0, 0, 0, NULL, NULL, NULL }
 };
 
@@ -2360,6 +2366,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
        ID(PCI_DEVICE_ID_INTEL_IGD_E_HB),
        ID(PCI_DEVICE_ID_INTEL_Q45_HB),
        ID(PCI_DEVICE_ID_INTEL_G45_HB),
+       ID(PCI_DEVICE_ID_INTEL_G41_HB),
        { }
 };
 
index 53fdc7ff387051507ade9def9a75eb67ff1bfd13..32b8bbf5003e17678a996e50ccb2c9b38d018886 100644 (file)
@@ -46,7 +46,7 @@
 /*
  * The High Precision Event Timer driver.
  * This driver is closely modelled after the rtc.c driver.
- * http://www.intel.com/hardwaredesign/hpetspec.htm
+ * http://www.intel.com/hardwaredesign/hpetspec_1.pdf
  */
 #define        HPET_USER_FREQ  (64)
 #define        HPET_DRIFT      (500)
index 675076f5fca881d0ab3752b44c8356766dac1c3a..d26891bfcd4154effba426421322ed053acb9d9b 100644 (file)
@@ -558,23 +558,9 @@ struct timer_rand_state {
        unsigned dont_count_entropy:1;
 };
 
-static struct timer_rand_state *irq_timer_state[NR_IRQS];
-
-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
-{
-       if (irq >= nr_irqs)
-               return NULL;
-
-       return irq_timer_state[irq];
-}
-
-static void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
-{
-       if (irq >= nr_irqs)
-               return;
-
-       irq_timer_state[irq] = state;
-}
+#ifndef CONFIG_SPARSE_IRQ
+struct timer_rand_state *irq_timer_state[NR_IRQS];
+#endif
 
 static struct timer_rand_state input_timer_state;
 
@@ -933,8 +919,10 @@ void rand_initialize_irq(int irq)
 {
        struct timer_rand_state *state;
 
+#ifndef CONFIG_SPARSE_IRQ
        if (irq >= nr_irqs)
                return;
+#endif
 
        state = get_timer_rand_state(irq);
 
index c20171078d1d6f475f04a23fc0be9e63101d5d76..e1129fad96dd2c75a20cc283dc6e53045b3a553d 100644 (file)
@@ -57,11 +57,6 @@ u32 acpi_pm_read_verified(void)
        return v2;
 }
 
-static cycle_t acpi_pm_read_slow(void)
-{
-       return (cycle_t)acpi_pm_read_verified();
-}
-
 static cycle_t acpi_pm_read(void)
 {
        return (cycle_t)read_pmtmr();
@@ -88,6 +83,11 @@ static int __init acpi_pm_good_setup(char *__str)
 }
 __setup("acpi_pm_good", acpi_pm_good_setup);
 
+static cycle_t acpi_pm_read_slow(void)
+{
+       return (cycle_t)acpi_pm_read_verified();
+}
+
 static inline void acpi_pm_need_workaround(void)
 {
        clocksource_acpi_pm.read = acpi_pm_read_slow;
index a8b33c2ec8d2fa951a4b81509cf944eb0e4759af..5130b72d593c5726e34263ad0d63eb248b1265f6 100644 (file)
@@ -7,6 +7,8 @@
 menuconfig DRM
        tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
        depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
+       select I2C
+       select I2C_ALGOBIT
        help
          Kernel-level support for the Direct Rendering Infrastructure (DRI)
          introduced in XFree86 4.0. If you say Y here, you need to select
@@ -65,6 +67,10 @@ config DRM_I830
          will load the correct one.
 
 config DRM_I915
+       select FB_CFB_FILLRECT
+       select FB_CFB_COPYAREA
+       select FB_CFB_IMAGEBLIT
+       depends on FB
        tristate "i915 driver"
        help
          Choose this option if you have a system that has Intel 830M, 845G,
@@ -76,6 +82,17 @@ config DRM_I915
 
 endchoice
 
+config DRM_I915_KMS
+       bool "Enable modesetting on intel by default"
+       depends on DRM_I915
+       help
+       Choose this option if you want kernel modesetting enabled by default,
+       and you have a new enough userspace to support this. Running old
+       userspaces with this enabled will cause pain.  Note that this causes
+       the driver to bind to PCI devices, which precludes loading things
+       like intelfb.
+
+
 config DRM_MGA
        tristate "Matrox g200/g400"
        depends on DRM
index 74da99495e21d8cbce22853d5e943d2b5e37e647..30022c4a5c12a0be54aef5cfb7b0fddd7abd06df 100644 (file)
@@ -9,7 +9,8 @@ drm-y       :=  drm_auth.o drm_bufs.o drm_cache.o \
                drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
                drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
                drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
-               drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
+               drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+               drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 
index a73462723d2d517d2c7af88cb46880b9024ee354..ca7a9ef5007b3c383f05f6e8969dfb37c1368408 100644 (file)
  * the one with matching magic number, while holding the drm_device::struct_mutex
  * lock.
  */
-static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic)
+static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
 {
        struct drm_file *retval = NULL;
        struct drm_magic_entry *pt;
        struct drm_hash_item *hash;
+       struct drm_device *dev = master->minor->dev;
 
        mutex_lock(&dev->struct_mutex);
-       if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
+       if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
                pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
                retval = pt->priv;
        }
@@ -71,11 +72,11 @@ static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic
  * associated the magic number hash key in drm_device::magiclist, while holding
  * the drm_device::struct_mutex lock.
  */
-static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
+static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
                         drm_magic_t magic)
 {
        struct drm_magic_entry *entry;
-
+       struct drm_device *dev = master->minor->dev;
        DRM_DEBUG("%d\n", magic);
 
        entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
@@ -83,11 +84,10 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
                return -ENOMEM;
        memset(entry, 0, sizeof(*entry));
        entry->priv = priv;
-
        entry->hash_item.key = (unsigned long)magic;
        mutex_lock(&dev->struct_mutex);
-       drm_ht_insert_item(&dev->magiclist, &entry->hash_item);
-       list_add_tail(&entry->head, &dev->magicfree);
+       drm_ht_insert_item(&master->magiclist, &entry->hash_item);
+       list_add_tail(&entry->head, &master->magicfree);
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
@@ -102,20 +102,21 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
  * Searches and unlinks the entry in drm_device::magiclist with the magic
  * number hash key, while holding the drm_device::struct_mutex lock.
  */
-static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
+static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
 {
        struct drm_magic_entry *pt;
        struct drm_hash_item *hash;
+       struct drm_device *dev = master->minor->dev;
 
        DRM_DEBUG("%d\n", magic);
 
        mutex_lock(&dev->struct_mutex);
-       if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
+       if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
                mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
        pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
-       drm_ht_remove_item(&dev->magiclist, hash);
+       drm_ht_remove_item(&master->magiclist, hash);
        list_del(&pt->head);
        mutex_unlock(&dev->struct_mutex);
 
@@ -153,9 +154,9 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
                                ++sequence;     /* reserve 0 */
                        auth->magic = sequence++;
                        spin_unlock(&lock);
-               } while (drm_find_file(dev, auth->magic));
+               } while (drm_find_file(file_priv->master, auth->magic));
                file_priv->magic = auth->magic;
-               drm_add_magic(dev, file_priv, auth->magic);
+               drm_add_magic(file_priv->master, file_priv, auth->magic);
        }
 
        DRM_DEBUG("%u\n", auth->magic);
@@ -181,9 +182,9 @@ int drm_authmagic(struct drm_device *dev, void *data,
        struct drm_file *file;
 
        DRM_DEBUG("%u\n", auth->magic);
-       if ((file = drm_find_file(dev, auth->magic))) {
+       if ((file = drm_find_file(file_priv->master, auth->magic))) {
                file->authenticated = 1;
-               drm_remove_magic(dev, auth->magic);
+               drm_remove_magic(file_priv->master, auth->magic);
                return 0;
        }
        return -EINVAL;
index bde64b84166e1d783a2c5a8ece1816c483e76f0b..72c667f9bee1aadbc4067687d05aca0849be751b 100644 (file)
@@ -54,9 +54,9 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
 {
        struct drm_map_list *entry;
        list_for_each_entry(entry, &dev->maplist, head) {
-               if (entry->map && map->type == entry->map->type &&
+               if (entry->map && (entry->master == dev->primary->master) && (map->type == entry->map->type) &&
                    ((entry->map->offset == map->offset) ||
-                    (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
+                    ((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) {
                        return entry;
                }
        }
@@ -210,12 +210,12 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
                map->offset = (unsigned long)map->handle;
                if (map->flags & _DRM_CONTAINS_LOCK) {
                        /* Prevent a 2nd X Server from creating a 2nd lock */
-                       if (dev->lock.hw_lock != NULL) {
+                       if (dev->primary->master->lock.hw_lock != NULL) {
                                vfree(map->handle);
                                drm_free(map, sizeof(*map), DRM_MEM_MAPS);
                                return -EBUSY;
                        }
-                       dev->sigdata.lock = dev->lock.hw_lock = map->handle;    /* Pointer to lock */
+                       dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;   /* Pointer to lock */
                }
                break;
        case _DRM_AGP: {
@@ -261,6 +261,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
                }
                DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
 
+               break;
+       case _DRM_GEM:
+               DRM_ERROR("tried to rmmap GEM object\n");
                break;
        }
        case _DRM_SCATTER_GATHER:
@@ -319,6 +322,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
        list->user_token = list->hash.key << PAGE_SHIFT;
        mutex_unlock(&dev->struct_mutex);
 
+       list->master = dev->primary->master;
        *maplist = list;
        return 0;
        }
@@ -345,7 +349,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
        struct drm_map_list *maplist;
        int err;
 
-       if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
+       if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
                return -EPERM;
 
        err = drm_addmap_core(dev, map->offset, map->size, map->type,
@@ -380,10 +384,12 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
        struct drm_map_list *r_list = NULL, *list_t;
        drm_dma_handle_t dmah;
        int found = 0;
+       struct drm_master *master;
 
        /* Find the list entry for the map and remove it */
        list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
                if (r_list->map == map) {
+                       master = r_list->master;
                        list_del(&r_list->head);
                        drm_ht_remove_key(&dev->map_hash,
                                          r_list->user_token >> PAGE_SHIFT);
@@ -409,6 +415,13 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
                break;
        case _DRM_SHM:
                vfree(map->handle);
+               if (master) {
+                       if (dev->sigdata.lock == master->lock.hw_lock)
+                               dev->sigdata.lock = NULL;
+                       master->lock.hw_lock = NULL;   /* SHM removed */
+                       master->lock.file_priv = NULL;
+                       wake_up_interruptible(&master->lock.lock_queue);
+               }
                break;
        case _DRM_AGP:
        case _DRM_SCATTER_GATHER:
@@ -419,11 +432,15 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
                dmah.size = map->size;
                __drm_pci_free(dev, &dmah);
                break;
+       case _DRM_GEM:
+               DRM_ERROR("tried to rmmap GEM object\n");
+               break;
        }
        drm_free(map, sizeof(*map), DRM_MEM_MAPS);
 
        return 0;
 }
+EXPORT_SYMBOL(drm_rmmap_locked);
 
 int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
 {
index d505f695421fbeff447435c450a2882e40133227..809ec0f034524506ef0b4cef13e4d60b89fa10d3 100644 (file)
@@ -256,12 +256,13 @@ static int drm_context_switch(struct drm_device * dev, int old, int new)
  * hardware lock is held, clears the drm_device::context_flag and wakes up
  * drm_device::context_wait.
  */
-static int drm_context_switch_complete(struct drm_device * dev, int new)
+static int drm_context_switch_complete(struct drm_device *dev,
+                                      struct drm_file *file_priv, int new)
 {
        dev->last_context = new;        /* PRE/POST: This is the _only_ writer. */
        dev->last_switch = jiffies;
 
-       if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+       if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
                DRM_ERROR("Lock isn't held after context switch\n");
        }
 
@@ -420,7 +421,7 @@ int drm_newctx(struct drm_device *dev, void *data,
        struct drm_ctx *ctx = data;
 
        DRM_DEBUG("%d\n", ctx->handle);
-       drm_context_switch_complete(dev, ctx->handle);
+       drm_context_switch_complete(dev, file_priv, ctx->handle);
 
        return 0;
 }
@@ -442,9 +443,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
        struct drm_ctx *ctx = data;
 
        DRM_DEBUG("%d\n", ctx->handle);
-       if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
-               file_priv->remove_auth_on_close = 1;
-       }
        if (ctx->handle != DRM_KERNEL_CONTEXT) {
                if (dev->driver->context_dtor)
                        dev->driver->context_dtor(dev, ctx->handle);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
new file mode 100644 (file)
index 0000000..53c8725
--- /dev/null
@@ -0,0 +1,2446 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ *      Keith Packard
+ *     Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+#include <linux/list.h>
+#include "drm.h"
+#include "drmP.h"
+#include "drm_crtc.h"
+
+struct drm_prop_enum_list {
+       int type;
+       char *name;
+};
+
+/* Avoid boilerplate.  I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list)                         \
+       char *fnname(int val)                                   \
+       {                                                       \
+               int i;                                          \
+               for (i = 0; i < ARRAY_SIZE(list); i++) {        \
+                       if (list[i].type == val)                \
+                               return list[i].name;            \
+               }                                               \
+               return "(unknown)";                             \
+       }
+
+/*
+ * Global properties
+ */
+static struct drm_prop_enum_list drm_dpms_enum_list[] =
+{      { DRM_MODE_DPMS_ON, "On" },
+       { DRM_MODE_DPMS_STANDBY, "Standby" },
+       { DRM_MODE_DPMS_SUSPEND, "Suspend" },
+       { DRM_MODE_DPMS_OFF, "Off" }
+};
+
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/*
+ * Optional properties
+ */
+static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
+{
+       { DRM_MODE_SCALE_NON_GPU, "Non-GPU" },
+       { DRM_MODE_SCALE_FULLSCREEN, "Fullscreen" },
+       { DRM_MODE_SCALE_NO_SCALE, "No scale" },
+       { DRM_MODE_SCALE_ASPECT, "Aspect" },
+};
+
+static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
+{
+       { DRM_MODE_DITHERING_OFF, "Off" },
+       { DRM_MODE_DITHERING_ON, "On" },
+};
+
+/*
+ * Non-global properties, but "required" for certain connectors.
+ */
+static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+       { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+       { DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+                drm_dvi_i_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_tv_select_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+{
+       { DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+       { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+       { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+                drm_tv_subconnector_enum_list)
+
+struct drm_conn_prop_enum_list {
+       int type;
+       char *name;
+       int count;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
+{      { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
+       { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
+       { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
+       { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
+       { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
+       { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
+       { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
+       { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
+       { DRM_MODE_CONNECTOR_Component, "Component", 0 },
+       { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
+       { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
+       { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
+       { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
+};
+
+static struct drm_prop_enum_list drm_encoder_enum_list[] =
+{      { DRM_MODE_ENCODER_NONE, "None" },
+       { DRM_MODE_ENCODER_DAC, "DAC" },
+       { DRM_MODE_ENCODER_TMDS, "TMDS" },
+       { DRM_MODE_ENCODER_LVDS, "LVDS" },
+       { DRM_MODE_ENCODER_TVDAC, "TV" },
+};
+
+char *drm_get_encoder_name(struct drm_encoder *encoder)
+{
+       static char buf[32];
+
+       snprintf(buf, 32, "%s-%d",
+                drm_encoder_enum_list[encoder->encoder_type].name,
+                encoder->base.id);
+       return buf;
+}
+
+char *drm_get_connector_name(struct drm_connector *connector)
+{
+       static char buf[32];
+
+       snprintf(buf, 32, "%s-%d",
+                drm_connector_enum_list[connector->connector_type].name,
+                connector->connector_type_id);
+       return buf;
+}
+EXPORT_SYMBOL(drm_get_connector_name);
+
+char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+       if (status == connector_status_connected)
+               return "connected";
+       else if (status == connector_status_disconnected)
+               return "disconnected";
+       else
+               return "unknown";
+}
+
+/**
+ * drm_mode_object_get - allocate a new identifier
+ * @dev: DRM device
+ * @ptr: object pointer, used to generate unique ID
+ * @type: object type
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space.  Used
+ * for tracking modes, CRTCs and connectors.
+ *
+ * RETURNS:
+ * New unique (relative to other objects in @dev) integer identifier for the
+ * object.
+ */
+static int drm_mode_object_get(struct drm_device *dev,
+                              struct drm_mode_object *obj, uint32_t obj_type)
+{
+       int new_id = 0;
+       int ret;
+
+       WARN(!mutex_is_locked(&dev->mode_config.mutex),
+            "%s called w/o mode_config lock\n", __FUNCTION__);
+again:
+       if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("Ran out memory getting a mode number\n");
+               return -EINVAL;
+       }
+
+       ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
+       if (ret == -EAGAIN)
+               goto again;
+
+       obj->id = new_id;
+       obj->type = obj_type;
+       return 0;
+}
+
+/**
+ * drm_mode_object_put - free an identifer
+ * @dev: DRM device
+ * @id: ID to free
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Free @id from @dev's unique identifier pool.
+ */
+static void drm_mode_object_put(struct drm_device *dev,
+                               struct drm_mode_object *object)
+{
+       idr_remove(&dev->mode_config.crtc_idr, object->id);
+}
+
+void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
+{
+       struct drm_mode_object *obj;
+
+       obj = idr_find(&dev->mode_config.crtc_idr, id);
+       if (!obj || (obj->type != type) || (obj->id != id))
+               return NULL;
+
+       return obj;
+}
+EXPORT_SYMBOL(drm_mode_object_find);
+
+/**
+ * drm_crtc_from_fb - find the CRTC structure associated with an fb
+ * @dev: DRM device
+ * @fb: framebuffer in question
+ *
+ * LOCKING:
+ * Caller must hold mode_config lock.
+ *
+ * Find CRTC in the mode_config structure that matches @fb.
+ *
+ * RETURNS:
+ * Pointer to the CRTC or NULL if it wasn't found.
+ */
+struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev,
+                                 struct drm_framebuffer *fb)
+{
+       struct drm_crtc *crtc;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (crtc->fb == fb)
+                       return crtc;
+       }
+       return NULL;
+}
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * RETURNS:
+ * Zero on success, error code on falure.
+ */
+int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+                        const struct drm_framebuffer_funcs *funcs)
+{
+       int ret;
+
+       ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+       if (ret) {
+               return ret;
+       }
+
+       fb->dev = dev;
+       fb->funcs = funcs;
+       dev->mode_config.num_fb++;
+       list_add(&fb->head, &dev->mode_config.fb_list);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_framebuffer_init);
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Scans all the CRTCs in @dev's mode_config.  If they're using @fb, removes
+ * it, setting it to NULL.
+ */
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+       struct drm_device *dev = fb->dev;
+       struct drm_crtc *crtc;
+
+       /* remove from any CRTC */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (crtc->fb == fb)
+                       crtc->fb = NULL;
+       }
+
+       drm_mode_object_put(dev, &fb->base);
+       list_del(&fb->head);
+       dev->mode_config.num_fb--;
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_crtc_init - Initialise a new CRTC object
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Inits a new object created as base part of an driver crtc object.
+ */
+void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+                  const struct drm_crtc_funcs *funcs)
+{
+       crtc->dev = dev;
+       crtc->funcs = funcs;
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+
+       list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
+       dev->mode_config.num_crtc++;
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_crtc_init);
+
+/**
+ * drm_crtc_cleanup - Cleans up the core crtc usage.
+ * @crtc: CRTC to cleanup
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Cleanup @crtc. Removes from drm modesetting space
+ * does NOT free object, caller does that.
+ */
+void drm_crtc_cleanup(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+
+       if (crtc->gamma_store) {
+               kfree(crtc->gamma_store);
+               crtc->gamma_store = NULL;
+       }
+
+       drm_mode_object_put(dev, &crtc->base);
+       list_del(&crtc->head);
+       dev->mode_config.num_crtc--;
+}
+EXPORT_SYMBOL(drm_crtc_cleanup);
+
+/**
+ * drm_mode_probed_add - add a mode to a connector's probed mode list
+ * @connector: connector the new mode
+ * @mode: mode data
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Add @mode to @connector's mode list for later use.
+ */
+void drm_mode_probed_add(struct drm_connector *connector,
+                        struct drm_display_mode *mode)
+{
+       list_add(&mode->head, &connector->probed_modes);
+}
+EXPORT_SYMBOL(drm_mode_probed_add);
+
+/**
+ * drm_mode_remove - remove and free a mode
+ * @connector: connector list to modify
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Remove @mode from @connector's mode list, then free it.
+ */
+void drm_mode_remove(struct drm_connector *connector,
+                    struct drm_display_mode *mode)
+{
+       list_del(&mode->head);
+       kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_remove);
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @name: user visible name of the connector
+ *
+ * LOCKING:
+ * Caller must hold @dev's mode_config lock.
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ */
+void drm_connector_init(struct drm_device *dev,
+                    struct drm_connector *connector,
+                    const struct drm_connector_funcs *funcs,
+                    int connector_type)
+{
+       mutex_lock(&dev->mode_config.mutex);
+
+       connector->dev = dev;
+       connector->funcs = funcs;
+       drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+       connector->connector_type = connector_type;
+       connector->connector_type_id =
+               ++drm_connector_enum_list[connector_type].count; /* TODO */
+       INIT_LIST_HEAD(&connector->user_modes);
+       INIT_LIST_HEAD(&connector->probed_modes);
+       INIT_LIST_HEAD(&connector->modes);
+       connector->edid_blob_ptr = NULL;
+
+       list_add_tail(&connector->head, &dev->mode_config.connector_list);
+       dev->mode_config.num_connector++;
+
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.edid_property, 0);
+
+       drm_connector_attach_property(connector,
+                                     dev->mode_config.dpms_property, 0);
+
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_connector_init);
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ * LOCKING:
+ * Caller must hold @dev's mode_config lock.
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void drm_connector_cleanup(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode, *t;
+
+       list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+               drm_mode_remove(connector, mode);
+
+       list_for_each_entry_safe(mode, t, &connector->modes, head)
+               drm_mode_remove(connector, mode);
+
+       list_for_each_entry_safe(mode, t, &connector->user_modes, head)
+               drm_mode_remove(connector, mode);
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_object_put(dev, &connector->base);
+       list_del(&connector->head);
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cleanup);
+
+void drm_encoder_init(struct drm_device *dev,
+                     struct drm_encoder *encoder,
+                     const struct drm_encoder_funcs *funcs,
+                     int encoder_type)
+{
+       mutex_lock(&dev->mode_config.mutex);
+
+       encoder->dev = dev;
+
+       drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+       encoder->encoder_type = encoder_type;
+       encoder->funcs = funcs;
+
+       list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+       dev->mode_config.num_encoder++;
+
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_encoder_init);
+
+void drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_object_put(dev, &encoder->base);
+       list_del(&encoder->head);
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_encoder_cleanup);
+
+/**
+ * drm_mode_create - create a new display mode
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold DRM mode_config lock.
+ *
+ * Create a new drm_display_mode, give it an ID, and return it.
+ *
+ * RETURNS:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+{
+       struct drm_display_mode *nmode;
+
+       nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
+       if (!nmode)
+               return NULL;
+
+       drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
+       return nmode;
+}
+EXPORT_SYMBOL(drm_mode_create);
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free @mode's unique identifier, then free it.
+ */
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+       drm_mode_object_put(dev, &mode->base);
+
+       kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_destroy);
+
+static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+{
+       struct drm_property *edid;
+       struct drm_property *dpms;
+       int i;
+
+       /*
+        * Standard properties (apply to all connectors)
+        */
+       edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+                                  DRM_MODE_PROP_IMMUTABLE,
+                                  "EDID", 0);
+       dev->mode_config.edid_property = edid;
+
+       dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                  "DPMS", ARRAY_SIZE(drm_dpms_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
+               drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
+                                     drm_dpms_enum_list[i].name);
+       dev->mode_config.dpms_property = dpms;
+
+       return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+       struct drm_property *dvi_i_selector;
+       struct drm_property *dvi_i_subconnector;
+       int i;
+
+       if (dev->mode_config.dvi_i_select_subconnector_property)
+               return 0;
+
+       dvi_i_selector =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                   "select subconnector",
+                                   ARRAY_SIZE(drm_dvi_i_select_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
+               drm_property_add_enum(dvi_i_selector, i,
+                                     drm_dvi_i_select_enum_list[i].type,
+                                     drm_dvi_i_select_enum_list[i].name);
+       dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+       dvi_i_subconnector =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM |
+                                   DRM_MODE_PROP_IMMUTABLE,
+                                   "subconnector",
+                                   ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
+               drm_property_add_enum(dvi_i_subconnector, i,
+                                     drm_dvi_i_subconnector_enum_list[i].type,
+                                     drm_dvi_i_subconnector_enum_list[i].name);
+       dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device.  Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+                                 char *modes[])
+{
+       struct drm_property *tv_selector;
+       struct drm_property *tv_subconnector;
+       int i;
+
+       if (dev->mode_config.tv_select_subconnector_property)
+               return 0;
+
+       /*
+        * Basic connector properties
+        */
+       tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                         "select subconnector",
+                                         ARRAY_SIZE(drm_tv_select_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
+               drm_property_add_enum(tv_selector, i,
+                                     drm_tv_select_enum_list[i].type,
+                                     drm_tv_select_enum_list[i].name);
+       dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+       tv_subconnector =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM |
+                                   DRM_MODE_PROP_IMMUTABLE, "subconnector",
+                                   ARRAY_SIZE(drm_tv_subconnector_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
+               drm_property_add_enum(tv_subconnector, i,
+                                     drm_tv_subconnector_enum_list[i].type,
+                                     drm_tv_subconnector_enum_list[i].name);
+       dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+       /*
+        * Other, TV specific properties: margins & TV modes.
+        */
+       dev->mode_config.tv_left_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "left margin", 2);
+       dev->mode_config.tv_left_margin_property->values[0] = 0;
+       dev->mode_config.tv_left_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_right_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "right margin", 2);
+       dev->mode_config.tv_right_margin_property->values[0] = 0;
+       dev->mode_config.tv_right_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_top_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "top margin", 2);
+       dev->mode_config.tv_top_margin_property->values[0] = 0;
+       dev->mode_config.tv_top_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_bottom_margin_property =
+               drm_property_create(dev, DRM_MODE_PROP_RANGE,
+                                   "bottom margin", 2);
+       dev->mode_config.tv_bottom_margin_property->values[0] = 0;
+       dev->mode_config.tv_bottom_margin_property->values[1] = 100;
+
+       dev->mode_config.tv_mode_property =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                   "mode", num_modes);
+       for (i = 0; i < num_modes; i++)
+               drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+                                     i, modes[i]);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_tv_properties);
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+       struct drm_property *scaling_mode;
+       int i;
+
+       if (dev->mode_config.scaling_mode_property)
+               return 0;
+
+       scaling_mode =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
+                                   ARRAY_SIZE(drm_scaling_mode_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
+               drm_property_add_enum(scaling_mode, i,
+                                     drm_scaling_mode_enum_list[i].type,
+                                     drm_scaling_mode_enum_list[i].name);
+
+       dev->mode_config.scaling_mode_property = scaling_mode;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
+
+/**
+ * drm_mode_create_dithering_property - create dithering property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dithering_property(struct drm_device *dev)
+{
+       struct drm_property *dithering_mode;
+       int i;
+
+       if (dev->mode_config.dithering_mode_property)
+               return 0;
+
+       dithering_mode =
+               drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
+                                   ARRAY_SIZE(drm_dithering_mode_enum_list));
+       for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
+               drm_property_add_enum(dithering_mode, i,
+                                     drm_dithering_mode_enum_list[i].type,
+                                     drm_dithering_mode_enum_list[i].name);
+       dev->mode_config.dithering_mode_property = dithering_mode;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dithering_property);
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * None, should happen single threaded at init time.
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+       mutex_init(&dev->mode_config.mutex);
+       INIT_LIST_HEAD(&dev->mode_config.fb_list);
+       INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
+       INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+       INIT_LIST_HEAD(&dev->mode_config.connector_list);
+       INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+       INIT_LIST_HEAD(&dev->mode_config.property_list);
+       INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+       idr_init(&dev->mode_config.crtc_idr);
+
+       mutex_lock(&dev->mode_config.mutex);
+       drm_mode_create_standard_connector_properties(dev);
+       mutex_unlock(&dev->mode_config.mutex);
+
+       /* Just to be sure */
+       dev->mode_config.num_fb = 0;
+       dev->mode_config.num_connector = 0;
+       dev->mode_config.num_crtc = 0;
+       dev->mode_config.num_encoder = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+{
+       uint32_t total_objects = 0;
+
+       total_objects += dev->mode_config.num_crtc;
+       total_objects += dev->mode_config.num_connector;
+       total_objects += dev->mode_config.num_encoder;
+
+       if (total_objects == 0)
+               return -EINVAL;
+
+       group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
+       if (!group->id_list)
+               return -ENOMEM;
+
+       group->num_crtcs = 0;
+       group->num_connectors = 0;
+       group->num_encoders = 0;
+       return 0;
+}
+
+int drm_mode_group_init_legacy_group(struct drm_device *dev,
+                                    struct drm_mode_group *group)
+{
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       struct drm_connector *connector;
+       int ret;
+
+       if ((ret = drm_mode_group_init(dev, group)))
+               return ret;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               group->id_list[group->num_crtcs++] = crtc->base.id;
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+               group->id_list[group->num_crtcs + group->num_encoders++] =
+               encoder->base.id;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               group->id_list[group->num_crtcs + group->num_encoders +
+                              group->num_connectors++] = connector->base.id;
+
+       return 0;
+}
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+       struct drm_connector *connector, *ot;
+       struct drm_crtc *crtc, *ct;
+       struct drm_encoder *encoder, *enct;
+       struct drm_framebuffer *fb, *fbt;
+       struct drm_property *property, *pt;
+
+       list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+                                head) {
+               encoder->funcs->destroy(encoder);
+       }
+
+       list_for_each_entry_safe(connector, ot,
+                                &dev->mode_config.connector_list, head) {
+               connector->funcs->destroy(connector);
+       }
+
+       list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+                                head) {
+               drm_property_destroy(dev, property);
+       }
+
+       list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+               fb->funcs->destroy(fb);
+       }
+
+       list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+               crtc->funcs->destroy(crtc);
+       }
+
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+/**
+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
+ * @out: drm_mode_modeinfo struct to return to the user
+ * @in: drm_display_mode to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
+ * the user.
+ */
+void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+                              struct drm_display_mode *in)
+{
+       out->clock = in->clock;
+       out->hdisplay = in->hdisplay;
+       out->hsync_start = in->hsync_start;
+       out->hsync_end = in->hsync_end;
+       out->htotal = in->htotal;
+       out->hskew = in->hskew;
+       out->vdisplay = in->vdisplay;
+       out->vsync_start = in->vsync_start;
+       out->vsync_end = in->vsync_end;
+       out->vtotal = in->vtotal;
+       out->vscan = in->vscan;
+       out->vrefresh = in->vrefresh;
+       out->flags = in->flags;
+       out->type = in->type;
+       strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+       out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * @out: drm_display_mode to return to the user
+ * @in: drm_mode_modeinfo to use
+ *
+ * LOCKING:
+ * None.
+ *
+ * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
+ * the caller.
+ */
+void drm_crtc_convert_umode(struct drm_display_mode *out,
+                           struct drm_mode_modeinfo *in)
+{
+       out->clock = in->clock;
+       out->hdisplay = in->hdisplay;
+       out->hsync_start = in->hsync_start;
+       out->hsync_end = in->hsync_end;
+       out->htotal = in->htotal;
+       out->hskew = in->hskew;
+       out->vdisplay = in->vdisplay;
+       out->vsync_start = in->vsync_start;
+       out->vsync_end = in->vsync_end;
+       out->vtotal = in->vtotal;
+       out->vscan = in->vscan;
+       out->vrefresh = in->vrefresh;
+       out->flags = in->flags;
+       out->type = in->type;
+       strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+       out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getresources(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       struct drm_mode_card_res *card_res = data;
+       struct list_head *lh;
+       struct drm_framebuffer *fb;
+       struct drm_connector *connector;
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       int ret = 0;
+       int connector_count = 0;
+       int crtc_count = 0;
+       int fb_count = 0;
+       int encoder_count = 0;
+       int copied = 0, i;
+       uint32_t __user *fb_id;
+       uint32_t __user *crtc_id;
+       uint32_t __user *connector_id;
+       uint32_t __user *encoder_id;
+       struct drm_mode_group *mode_group;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       /*
+        * For the non-control nodes we need to limit the list of resources
+        * by IDs in the group list for this node
+        */
+       list_for_each(lh, &file_priv->fbs)
+               fb_count++;
+
+       mode_group = &file_priv->master->minor->mode_group;
+       if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+
+               list_for_each(lh, &dev->mode_config.crtc_list)
+                       crtc_count++;
+
+               list_for_each(lh, &dev->mode_config.connector_list)
+                       connector_count++;
+
+               list_for_each(lh, &dev->mode_config.encoder_list)
+                       encoder_count++;
+       } else {
+
+               crtc_count = mode_group->num_crtcs;
+               connector_count = mode_group->num_connectors;
+               encoder_count = mode_group->num_encoders;
+       }
+
+       card_res->max_height = dev->mode_config.max_height;
+       card_res->min_height = dev->mode_config.min_height;
+       card_res->max_width = dev->mode_config.max_width;
+       card_res->min_width = dev->mode_config.min_width;
+
+       /* handle this in 4 parts */
+       /* FBs */
+       if (card_res->count_fbs >= fb_count) {
+               copied = 0;
+               fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+               list_for_each_entry(fb, &file_priv->fbs, head) {
+                       if (put_user(fb->base.id, fb_id + copied)) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
+       }
+       card_res->count_fbs = fb_count;
+
+       /* CRTCs */
+       if (card_res->count_crtcs >= crtc_count) {
+               copied = 0;
+               crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+               if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+                       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                                           head) {
+                               DRM_DEBUG("CRTC ID is %d\n", crtc->base.id);
+                               if (put_user(crtc->base.id, crtc_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               } else {
+                       for (i = 0; i < mode_group->num_crtcs; i++) {
+                               if (put_user(mode_group->id_list[i],
+                                            crtc_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       card_res->count_crtcs = crtc_count;
+
+       /* Encoders */
+       if (card_res->count_encoders >= encoder_count) {
+               copied = 0;
+               encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+               if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+                       list_for_each_entry(encoder,
+                                           &dev->mode_config.encoder_list,
+                                           head) {
+                               DRM_DEBUG("ENCODER ID is %d\n",
+                                         encoder->base.id);
+                               if (put_user(encoder->base.id, encoder_id +
+                                            copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               } else {
+                       for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
+                               if (put_user(mode_group->id_list[i],
+                                            encoder_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+
+               }
+       }
+       card_res->count_encoders = encoder_count;
+
+       /* Connectors */
+       if (card_res->count_connectors >= connector_count) {
+               copied = 0;
+               connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+               if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+                       list_for_each_entry(connector,
+                                           &dev->mode_config.connector_list,
+                                           head) {
+                               DRM_DEBUG("CONNECTOR ID is %d\n",
+                                         connector->base.id);
+                               if (put_user(connector->base.id,
+                                            connector_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               } else {
+                       int start = mode_group->num_crtcs +
+                               mode_group->num_encoders;
+                       for (i = start; i < start + mode_group->num_connectors; i++) {
+                               if (put_user(mode_group->id_list[i],
+                                            connector_id + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       card_res->count_connectors = connector_count;
+
+       DRM_DEBUG("Counted %d %d %d\n", card_res->count_crtcs,
+                 card_res->count_connectors, card_res->count_encoders);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getcrtc - get CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Construct a CRTC configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getcrtc(struct drm_device *dev,
+                    void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc *crtc_resp = data;
+       struct drm_crtc *crtc;
+       struct drm_mode_object *obj;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
+                                  DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       crtc_resp->x = crtc->x;
+       crtc_resp->y = crtc->y;
+       crtc_resp->gamma_size = crtc->gamma_size;
+       if (crtc->fb)
+               crtc_resp->fb_id = crtc->fb->base.id;
+       else
+               crtc_resp->fb_id = 0;
+
+       if (crtc->enabled) {
+
+               drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+               crtc_resp->mode_valid = 1;
+
+       } else {
+               crtc_resp->mode_valid = 0;
+       }
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getconnector - get connector configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Construct a connector configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getconnector(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv)
+{
+       struct drm_mode_get_connector *out_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_connector *connector;
+       struct drm_display_mode *mode;
+       int mode_count = 0;
+       int props_count = 0;
+       int encoders_count = 0;
+       int ret = 0;
+       int copied = 0;
+       int i;
+       struct drm_mode_modeinfo u_mode;
+       struct drm_mode_modeinfo __user *mode_ptr;
+       uint32_t __user *prop_ptr;
+       uint64_t __user *prop_values;
+       uint32_t __user *encoder_ptr;
+
+       memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+       DRM_DEBUG("connector id %d:\n", out_resp->connector_id);
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, out_resp->connector_id,
+                                  DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] != 0) {
+                       props_count++;
+               }
+       }
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] != 0) {
+                       encoders_count++;
+               }
+       }
+
+       if (out_resp->count_modes == 0) {
+               connector->funcs->fill_modes(connector,
+                                            dev->mode_config.max_width,
+                                            dev->mode_config.max_height);
+       }
+
+       /* delayed so we get modes regardless of pre-fill_modes state */
+       list_for_each_entry(mode, &connector->modes, head)
+               mode_count++;
+
+       out_resp->connector_id = connector->base.id;
+       out_resp->connector_type = connector->connector_type;
+       out_resp->connector_type_id = connector->connector_type_id;
+       out_resp->mm_width = connector->display_info.width_mm;
+       out_resp->mm_height = connector->display_info.height_mm;
+       out_resp->subpixel = connector->display_info.subpixel_order;
+       out_resp->connection = connector->status;
+       if (connector->encoder)
+               out_resp->encoder_id = connector->encoder->base.id;
+       else
+               out_resp->encoder_id = 0;
+
+       /*
+        * This ioctl is called twice, once to determine how much space is
+        * needed, and the 2nd time to fill it.
+        */
+       if ((out_resp->count_modes >= mode_count) && mode_count) {
+               copied = 0;
+               mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
+               list_for_each_entry(mode, &connector->modes, head) {
+                       drm_crtc_convert_to_umode(&u_mode, mode);
+                       if (copy_to_user(mode_ptr + copied,
+                                        &u_mode, sizeof(u_mode))) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+                       copied++;
+               }
+       }
+       out_resp->count_modes = mode_count;
+
+       if ((out_resp->count_props >= props_count) && props_count) {
+               copied = 0;
+               prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
+               prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
+               for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+                       if (connector->property_ids[i] != 0) {
+                               if (put_user(connector->property_ids[i],
+                                            prop_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+
+                               if (put_user(connector->property_values[i],
+                                            prop_values + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       out_resp->count_props = props_count;
+
+       if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+               copied = 0;
+               encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
+               for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+                       if (connector->encoder_ids[i] != 0) {
+                               if (put_user(connector->encoder_ids[i],
+                                            encoder_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto out;
+                               }
+                               copied++;
+                       }
+               }
+       }
+       out_resp->count_encoders = encoders_count;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_getencoder(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_mode_get_encoder *enc_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_encoder *encoder;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, enc_resp->encoder_id,
+                                  DRM_MODE_OBJECT_ENCODER);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       encoder = obj_to_encoder(obj);
+
+       if (encoder->crtc)
+               enc_resp->crtc_id = encoder->crtc->base.id;
+       else
+               enc_resp->crtc_id = 0;
+       enc_resp->encoder_type = encoder->encoder_type;
+       enc_resp->encoder_id = encoder->base.id;
+       enc_resp->possible_crtcs = encoder->possible_crtcs;
+       enc_resp->possible_clones = encoder->possible_clones;
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Build a new CRTC configuration based on user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setcrtc(struct drm_device *dev, void *data,
+                    struct drm_file *file_priv)
+{
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_mode_crtc *crtc_req = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc, *crtcfb;
+       struct drm_connector **connector_set = NULL, *connector;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_display_mode *mode = NULL;
+       struct drm_mode_set set;
+       uint32_t __user *set_connectors_ptr;
+       int ret = 0;
+       int i;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, crtc_req->crtc_id,
+                                  DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               DRM_DEBUG("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       if (crtc_req->mode_valid) {
+               /* If we have a mode we need a framebuffer. */
+               /* If we pass -1, set the mode with the currently bound fb */
+               if (crtc_req->fb_id == -1) {
+                       list_for_each_entry(crtcfb,
+                                           &dev->mode_config.crtc_list, head) {
+                               if (crtcfb == crtc) {
+                                       DRM_DEBUG("Using current fb for setmode\n");
+                                       fb = crtc->fb;
+                               }
+                       }
+               } else {
+                       obj = drm_mode_object_find(dev, crtc_req->fb_id,
+                                                  DRM_MODE_OBJECT_FB);
+                       if (!obj) {
+                               DRM_DEBUG("Unknown FB ID%d\n", crtc_req->fb_id);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       fb = obj_to_fb(obj);
+               }
+
+               mode = drm_mode_create(dev);
+               drm_crtc_convert_umode(mode, &crtc_req->mode);
+               drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+       }
+
+       if (crtc_req->count_connectors == 0 && mode) {
+               DRM_DEBUG("Count connectors is 0 but mode set\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (crtc_req->count_connectors > 0 && !mode && !fb) {
+               DRM_DEBUG("Count connectors is %d but no mode or fb set\n",
+                         crtc_req->count_connectors);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (crtc_req->count_connectors > 0) {
+               u32 out_id;
+
+               /* Avoid unbounded kernel memory allocation */
+               if (crtc_req->count_connectors > config->num_connector) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               connector_set = kmalloc(crtc_req->count_connectors *
+                                       sizeof(struct drm_connector *),
+                                       GFP_KERNEL);
+               if (!connector_set) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               for (i = 0; i < crtc_req->count_connectors; i++) {
+                       set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
+                       if (get_user(out_id, &set_connectors_ptr[i])) {
+                               ret = -EFAULT;
+                               goto out;
+                       }
+
+                       obj = drm_mode_object_find(dev, out_id,
+                                                  DRM_MODE_OBJECT_CONNECTOR);
+                       if (!obj) {
+                               DRM_DEBUG("Connector id %d unknown\n", out_id);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       connector = obj_to_connector(obj);
+
+                       connector_set[i] = connector;
+               }
+       }
+
+       set.crtc = crtc;
+       set.x = crtc_req->x;
+       set.y = crtc_req->y;
+       set.mode = mode;
+       set.connectors = connector_set;
+       set.num_connectors = crtc_req->count_connectors;
+       set.fb =fb;
+       ret = crtc->funcs->set_config(&set);
+
+out:
+       kfree(connector_set);
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+                       void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_cursor *req = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       int ret = 0;
+
+       DRM_DEBUG("\n");
+
+       if (!req->flags) {
+               DRM_ERROR("no operation set\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               DRM_DEBUG("Unknown CRTC ID %d\n", req->crtc_id);
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       if (req->flags & DRM_MODE_CURSOR_BO) {
+               if (!crtc->funcs->cursor_set) {
+                       DRM_ERROR("crtc does not support cursor\n");
+                       ret = -ENXIO;
+                       goto out;
+               }
+               /* Turns off the cursor if handle is 0 */
+               ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+                                             req->width, req->height);
+       }
+
+       if (req->flags & DRM_MODE_CURSOR_MOVE) {
+               if (crtc->funcs->cursor_move) {
+                       ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+               } else {
+                       DRM_ERROR("crtc does not support cursor\n");
+                       ret = -EFAULT;
+                       goto out;
+               }
+       }
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd *r = data;
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_framebuffer *fb;
+       int ret = 0;
+
+       if ((config->min_width > r->width) || (r->width > config->max_width)) {
+               DRM_ERROR("mode new framebuffer width not within limits\n");
+               return -EINVAL;
+       }
+       if ((config->min_height > r->height) || (r->height > config->max_height)) {
+               DRM_ERROR("mode new framebuffer height not within limits\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       /* TODO check buffer is sufficently large */
+       /* TODO setup destructor callback */
+
+       fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+       if (!fb) {
+               DRM_ERROR("could not create framebuffer\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       r->fb_id = fb->base.id;
+       list_add(&fb->filp_head, &file_priv->fbs);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_rmfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_framebuffer *fb = NULL;
+       struct drm_framebuffer *fbl = NULL;
+       uint32_t *id = data;
+       int ret = 0;
+       int found = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
+       /* TODO check that we realy get a framebuffer back. */
+       if (!obj) {
+               DRM_ERROR("mode invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out;
+       }
+       fb = obj_to_fb(obj);
+
+       list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+               if (fb == fbl)
+                       found = 1;
+
+       if (!found) {
+               DRM_ERROR("tried to remove a fb that we didn't own\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* TODO release all crtc connected to the framebuffer */
+       /* TODO unhock the destructor from the buffer object */
+
+       list_del(&fb->filp_head);
+       fb->funcs->destroy(fb);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Caller? (FIXME)
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_fb_cmd *r = data;
+       struct drm_mode_object *obj;
+       struct drm_framebuffer *fb;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+       if (!obj) {
+               DRM_ERROR("invalid framebuffer id\n");
+               ret = -EINVAL;
+               goto out;
+       }
+       fb = obj_to_fb(obj);
+
+       r->height = fb->height;
+       r->width = fb->width;
+       r->depth = fb->depth;
+       r->bpp = fb->bits_per_pixel;
+       r->pitch = fb->pitch;
+       fb->funcs->create_handle(fb, file_priv, &r->handle);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @filp: file * from the ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+void drm_fb_release(struct file *filp)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_framebuffer *fb, *tfb;
+
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+               list_del(&fb->filp_head);
+               fb->funcs->destroy(fb);
+       }
+       mutex_unlock(&dev->mode_config.mutex);
+}
+
+/**
+ * drm_mode_attachmode - add a mode to the user mode list
+ * @dev: DRM device
+ * @connector: connector to add the mode to
+ * @mode: mode to add
+ *
+ * Add @mode to @connector's user mode list.
+ */
+static int drm_mode_attachmode(struct drm_device *dev,
+                              struct drm_connector *connector,
+                              struct drm_display_mode *mode)
+{
+       int ret = 0;
+
+       list_add_tail(&mode->head, &connector->user_modes);
+       return ret;
+}
+
+int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
+                            struct drm_display_mode *mode)
+{
+       struct drm_connector *connector;
+       int ret = 0;
+       struct drm_display_mode *dup_mode;
+       int need_dup = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (!connector->encoder)
+                       break;
+               if (connector->encoder->crtc == crtc) {
+                       if (need_dup)
+                               dup_mode = drm_mode_duplicate(dev, mode);
+                       else
+                               dup_mode = mode;
+                       ret = drm_mode_attachmode(dev, connector, dup_mode);
+                       if (ret)
+                               return ret;
+                       need_dup = 1;
+               }
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_attachmode_crtc);
+
+static int drm_mode_detachmode(struct drm_device *dev,
+                              struct drm_connector *connector,
+                              struct drm_display_mode *mode)
+{
+       int found = 0;
+       int ret = 0;
+       struct drm_display_mode *match_mode, *t;
+
+       list_for_each_entry_safe(match_mode, t, &connector->user_modes, head) {
+               if (drm_mode_equal(match_mode, mode)) {
+                       list_del(&match_mode->head);
+                       drm_mode_destroy(dev, match_mode);
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (!found)
+               ret = -EINVAL;
+
+       return ret;
+}
+
+int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
+{
+       struct drm_connector *connector;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               drm_mode_detachmode(dev, connector, mode);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_detachmode_crtc);
+
+/**
+ * drm_fb_attachmode - Attach a user mode to an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * This attaches a user specified mode to an connector.
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_attachmode_ioctl(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_mode_cmd *mode_cmd = data;
+       struct drm_connector *connector;
+       struct drm_display_mode *mode;
+       struct drm_mode_object *obj;
+       struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       mode = drm_mode_create(dev);
+       if (!mode) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       drm_crtc_convert_umode(mode, umode);
+
+       ret = drm_mode_attachmode(dev, connector, mode);
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+
+/**
+ * drm_fb_detachmode - Detach a user specified mode from an connector
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_detachmode_ioctl(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_mode_mode_cmd *mode_cmd = data;
+       struct drm_connector *connector;
+       struct drm_display_mode mode;
+       struct drm_mode_modeinfo *umode = &mode_cmd->mode;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       drm_crtc_convert_umode(&mode, umode);
+       ret = drm_mode_detachmode(dev, connector, &mode);
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+                                        const char *name, int num_values)
+{
+       struct drm_property *property = NULL;
+
+       property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+       if (!property)
+               return NULL;
+
+       if (num_values) {
+               property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
+               if (!property->values)
+                       goto fail;
+       }
+
+       drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+       property->flags = flags;
+       property->num_values = num_values;
+       INIT_LIST_HEAD(&property->enum_blob_list);
+
+       if (name)
+               strncpy(property->name, name, DRM_PROP_NAME_LEN);
+
+       list_add_tail(&property->head, &dev->mode_config.property_list);
+       return property;
+fail:
+       kfree(property);
+       return NULL;
+}
+EXPORT_SYMBOL(drm_property_create);
+
+int drm_property_add_enum(struct drm_property *property, int index,
+                         uint64_t value, const char *name)
+{
+       struct drm_property_enum *prop_enum;
+
+       if (!(property->flags & DRM_MODE_PROP_ENUM))
+               return -EINVAL;
+
+       if (!list_empty(&property->enum_blob_list)) {
+               list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+                       if (prop_enum->value == value) {
+                               strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+                               prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+                               return 0;
+                       }
+               }
+       }
+
+       prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
+       if (!prop_enum)
+               return -ENOMEM;
+
+       strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+       prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+       prop_enum->value = value;
+
+       property->values[index] = value;
+       list_add_tail(&prop_enum->head, &property->enum_blob_list);
+       return 0;
+}
+EXPORT_SYMBOL(drm_property_add_enum);
+
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+       struct drm_property_enum *prop_enum, *pt;
+
+       list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
+               list_del(&prop_enum->head);
+               kfree(prop_enum);
+       }
+
+       if (property->num_values)
+               kfree(property->values);
+       drm_mode_object_put(dev, &property->base);
+       list_del(&property->head);
+       kfree(property);
+}
+EXPORT_SYMBOL(drm_property_destroy);
+
+int drm_connector_attach_property(struct drm_connector *connector,
+                              struct drm_property *property, uint64_t init_val)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == 0) {
+                       connector->property_ids[i] = property->base.id;
+                       connector->property_values[i] = init_val;
+                       break;
+               }
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY)
+               return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_property);
+
+int drm_connector_property_set_value(struct drm_connector *connector,
+                                 struct drm_property *property, uint64_t value)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == property->base.id) {
+                       connector->property_values[i] = value;
+                       break;
+               }
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY)
+               return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_property_set_value);
+
+int drm_connector_property_get_value(struct drm_connector *connector,
+                                 struct drm_property *property, uint64_t *val)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == property->base.id) {
+                       *val = connector->property_values[i];
+                       break;
+               }
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY)
+               return -EINVAL;
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_property_get_value);
+
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_mode_get_property *out_resp = data;
+       struct drm_property *property;
+       int enum_count = 0;
+       int blob_count = 0;
+       int value_count = 0;
+       int ret = 0, i;
+       int copied;
+       struct drm_property_enum *prop_enum;
+       struct drm_mode_property_enum __user *enum_ptr;
+       struct drm_property_blob *prop_blob;
+       uint32_t *blob_id_ptr;
+       uint64_t __user *values_ptr;
+       uint32_t __user *blob_length_ptr;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+       if (!obj) {
+               ret = -EINVAL;
+               goto done;
+       }
+       property = obj_to_property(obj);
+
+       if (property->flags & DRM_MODE_PROP_ENUM) {
+               list_for_each_entry(prop_enum, &property->enum_blob_list, head)
+                       enum_count++;
+       } else if (property->flags & DRM_MODE_PROP_BLOB) {
+               list_for_each_entry(prop_blob, &property->enum_blob_list, head)
+                       blob_count++;
+       }
+
+       value_count = property->num_values;
+
+       strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+       out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+       out_resp->flags = property->flags;
+
+       if ((out_resp->count_values >= value_count) && value_count) {
+               values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
+               for (i = 0; i < value_count; i++) {
+                       if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+                               ret = -EFAULT;
+                               goto done;
+                       }
+               }
+       }
+       out_resp->count_values = value_count;
+
+       if (property->flags & DRM_MODE_PROP_ENUM) {
+               if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+                       copied = 0;
+                       enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
+                       list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+
+                               if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+
+                               if (copy_to_user(&enum_ptr[copied].name,
+                                                &prop_enum->name, DRM_PROP_NAME_LEN)) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+                               copied++;
+                       }
+               }
+               out_resp->count_enum_blobs = enum_count;
+       }
+
+       if (property->flags & DRM_MODE_PROP_BLOB) {
+               if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
+                       copied = 0;
+                       blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
+                       blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
+
+                       list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
+                               if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+
+                               if (put_user(prop_blob->length, blob_length_ptr + copied)) {
+                                       ret = -EFAULT;
+                                       goto done;
+                               }
+
+                               copied++;
+                       }
+               }
+               out_resp->count_enum_blobs = blob_count;
+       }
+done:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
+                                                         void *data)
+{
+       struct drm_property_blob *blob;
+
+       if (!length || !data)
+               return NULL;
+
+       blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+       if (!blob)
+               return NULL;
+
+       blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
+       blob->length = length;
+
+       memcpy(blob->data, data, length);
+
+       drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+
+       list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
+       return blob;
+}
+
+static void drm_property_destroy_blob(struct drm_device *dev,
+                              struct drm_property_blob *blob)
+{
+       drm_mode_object_put(dev, &blob->base);
+       list_del(&blob->head);
+       kfree(blob);
+}
+
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_object *obj;
+       struct drm_mode_get_blob *out_resp = data;
+       struct drm_property_blob *blob;
+       int ret = 0;
+       void *blob_ptr;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
+       if (!obj) {
+               ret = -EINVAL;
+               goto done;
+       }
+       blob = obj_to_blob(obj);
+
+       if (out_resp->length == blob->length) {
+               blob_ptr = (void *)(unsigned long)out_resp->data;
+               if (copy_to_user(blob_ptr, blob->data, blob->length)){
+                       ret = -EFAULT;
+                       goto done;
+               }
+       }
+       out_resp->length = blob->length;
+
+done:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+                                           struct edid *edid)
+{
+       struct drm_device *dev = connector->dev;
+       int ret = 0;
+
+       if (connector->edid_blob_ptr)
+               drm_property_destroy_blob(dev, connector->edid_blob_ptr);
+
+       /* Delete edid, when there is none. */
+       if (!edid) {
+               connector->edid_blob_ptr = NULL;
+               ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+               return ret;
+       }
+
+       connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
+
+       ret = drm_connector_property_set_value(connector,
+                                              dev->mode_config.edid_property,
+                                              connector->edid_blob_ptr->base.id);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+                                      void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_connector_set_property *out_resp = data;
+       struct drm_mode_object *obj;
+       struct drm_property *property;
+       struct drm_connector *connector;
+       int ret = -EINVAL;
+       int i;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+       if (!obj) {
+               goto out;
+       }
+       connector = obj_to_connector(obj);
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+               if (connector->property_ids[i] == out_resp->prop_id)
+                       break;
+       }
+
+       if (i == DRM_CONNECTOR_MAX_PROPERTY) {
+               goto out;
+       }
+
+       obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+       if (!obj) {
+               goto out;
+       }
+       property = obj_to_property(obj);
+
+       if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+               goto out;
+
+       if (property->flags & DRM_MODE_PROP_RANGE) {
+               if (out_resp->value < property->values[0])
+                       goto out;
+
+               if (out_resp->value > property->values[1])
+                       goto out;
+       } else {
+               int found = 0;
+               for (i = 0; i < property->num_values; i++) {
+                       if (property->values[i] == out_resp->value) {
+                               found = 1;
+                               break;
+                       }
+               }
+               if (!found) {
+                       goto out;
+               }
+       }
+
+       if (connector->funcs->set_property)
+               ret = connector->funcs->set_property(connector, property, out_resp->value);
+
+       /* store the property value if succesful */
+       if (!ret)
+               drm_connector_property_set_value(connector, property, out_resp->value);
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
+
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+                                     struct drm_encoder *encoder)
+{
+       int i;
+
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == 0) {
+                       connector->encoder_ids[i] = encoder->base.id;
+                       return 0;
+               }
+       }
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+
+void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+                                   struct drm_encoder *encoder)
+{
+       int i;
+       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+               if (connector->encoder_ids[i] == encoder->base.id) {
+                       connector->encoder_ids[i] = 0;
+                       if (connector->encoder == encoder)
+                               connector->encoder = NULL;
+                       break;
+               }
+       }
+}
+EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
+
+bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+                                 int gamma_size)
+{
+       crtc->gamma_size = gamma_size;
+
+       crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
+       if (!crtc->gamma_store) {
+               crtc->gamma_size = 0;
+               return false;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_lut *crtc_lut = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       void *r_base, *g_base, *b_base;
+       int size;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       /* memcpy into gamma store */
+       if (crtc_lut->gamma_size != crtc->gamma_size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       size = crtc_lut->gamma_size * (sizeof(uint16_t));
+       r_base = crtc->gamma_store;
+       if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       g_base = r_base + size;
+       if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       b_base = g_base + size;
+       if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+
+}
+
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_crtc_lut *crtc_lut = data;
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       void *r_base, *g_base, *b_base;
+       int size;
+       int ret = 0;
+
+       mutex_lock(&dev->mode_config.mutex);
+       obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               ret = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+
+       /* memcpy into gamma store */
+       if (crtc_lut->gamma_size != crtc->gamma_size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       size = crtc_lut->gamma_size * (sizeof(uint16_t));
+       r_base = crtc->gamma_store;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       g_base = r_base + size;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       b_base = g_base + size;
+       if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+out:
+       mutex_unlock(&dev->mode_config.mutex);
+       return ret;
+}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
new file mode 100644 (file)
index 0000000..d8a982b
--- /dev/null
@@ -0,0 +1,826 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ *      Keith Packard
+ *     Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * Detailed mode info for 800x600@60Hz
+ */
+static struct drm_display_mode std_mode[] = {
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 40000, 800, 840,
+                  968, 1056, 0, 600, 601, 605, 628, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+/**
+ * drm_helper_probe_connector_modes - get complete set of display modes
+ * @dev: DRM device
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Based on @dev's mode_config layout, scan all the connectors and try to detect
+ * modes on them.  Modes will first be added to the connector's probed_modes
+ * list, then culled (based on validity and the @maxX, @maxY parameters) and
+ * put into the normal modes list.
+ *
+ * Intended to be used either at bootup time or when major configuration
+ * changes have occurred.
+ *
+ * FIXME: take into account monitor limits
+ */
+void drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+                                            uint32_t maxX, uint32_t maxY)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode, *t;
+       struct drm_connector_helper_funcs *connector_funcs =
+               connector->helper_private;
+       int ret;
+
+       DRM_DEBUG("%s\n", drm_get_connector_name(connector));
+       /* set all modes to the unverified state */
+       list_for_each_entry_safe(mode, t, &connector->modes, head)
+               mode->status = MODE_UNVERIFIED;
+
+       connector->status = connector->funcs->detect(connector);
+
+       if (connector->status == connector_status_disconnected) {
+               DRM_DEBUG("%s is disconnected\n",
+                         drm_get_connector_name(connector));
+               /* TODO set EDID to NULL */
+               return;
+       }
+
+       ret = (*connector_funcs->get_modes)(connector);
+
+       if (ret) {
+               drm_mode_connector_list_update(connector);
+       }
+
+       if (maxX && maxY)
+               drm_mode_validate_size(dev, &connector->modes, maxX,
+                                      maxY, 0);
+       list_for_each_entry_safe(mode, t, &connector->modes, head) {
+               if (mode->status == MODE_OK)
+                       mode->status = connector_funcs->mode_valid(connector,
+                                                                  mode);
+       }
+
+
+       drm_mode_prune_invalid(dev, &connector->modes, true);
+
+       if (list_empty(&connector->modes)) {
+               struct drm_display_mode *stdmode;
+
+               DRM_DEBUG("No valid modes on %s\n",
+                         drm_get_connector_name(connector));
+
+               /* Should we do this here ???
+                * When no valid EDID modes are available we end up
+                * here and bailed in the past, now we add a standard
+                * 640x480@60Hz mode and carry on.
+                */
+               stdmode = drm_mode_duplicate(dev, &std_mode[0]);
+               drm_mode_probed_add(connector, stdmode);
+               drm_mode_list_concat(&connector->probed_modes,
+                                    &connector->modes);
+
+               DRM_DEBUG("Adding standard 640x480 @ 60Hz to %s\n",
+                         drm_get_connector_name(connector));
+       }
+
+       drm_mode_sort(&connector->modes);
+
+       DRM_DEBUG("Probed modes for %s\n", drm_get_connector_name(connector));
+       list_for_each_entry_safe(mode, t, &connector->modes, head) {
+               mode->vrefresh = drm_mode_vrefresh(mode);
+
+               drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+               drm_mode_debug_printmodeline(mode);
+       }
+}
+EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
+
+void drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
+                                     uint32_t maxY)
+{
+       struct drm_connector *connector;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               drm_helper_probe_single_connector_modes(connector, maxX, maxY);
+       }
+}
+EXPORT_SYMBOL(drm_helper_probe_connector_modes);
+
+
+/**
+ * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
+ * @crtc: CRTC to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @crtc's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @crtc is part of the mode_config, false otherwise.
+ */
+bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
+{
+       struct drm_encoder *encoder;
+       struct drm_device *dev = crtc->dev;
+       /* FIXME: Locking around list access? */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+               if (encoder->crtc == crtc)
+                       return true;
+       return false;
+}
+EXPORT_SYMBOL(drm_helper_crtc_in_use);
+
+/**
+ * drm_disable_unused_functions - disable unused objects
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
+ * by calling its dpms function, which should power it off.
+ */
+void drm_helper_disable_unused_functions(struct drm_device *dev)
+{
+       struct drm_encoder *encoder;
+       struct drm_encoder_helper_funcs *encoder_funcs;
+       struct drm_crtc *crtc;
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               encoder_funcs = encoder->helper_private;
+               if (!encoder->crtc)
+                       (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+       }
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+               crtc->enabled = drm_helper_crtc_in_use(crtc);
+               if (!crtc->enabled) {
+                       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+                       crtc->fb = NULL;
+               }
+       }
+}
+EXPORT_SYMBOL(drm_helper_disable_unused_functions);
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
+{
+       struct drm_display_mode *mode;
+
+       list_for_each_entry(mode, &connector->modes, head) {
+               if (drm_mode_width(mode) > width ||
+                   drm_mode_height(mode) > height)
+                       continue;
+               if (mode->type & DRM_MODE_TYPE_PREFERRED)
+                       return mode;
+       }
+       return NULL;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+       bool enable;
+
+       if (strict) {
+               enable = connector->status == connector_status_connected;
+       } else {
+               enable = connector->status != connector_status_disconnected;
+       }
+       return enable;
+}
+
+static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
+{
+       bool any_enabled = false;
+       struct drm_connector *connector;
+       int i = 0;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               enabled[i] = drm_connector_enabled(connector, true);
+               any_enabled |= enabled[i];
+               i++;
+       }
+
+       if (any_enabled)
+               return;
+
+       i = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               enabled[i] = drm_connector_enabled(connector, false);
+               i++;
+       }
+}
+
+static bool drm_target_preferred(struct drm_device *dev,
+                                struct drm_display_mode **modes,
+                                bool *enabled, int width, int height)
+{
+       struct drm_connector *connector;
+       int i = 0;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+               if (enabled[i] == false) {
+                       i++;
+                       continue;
+               }
+
+               modes[i] = drm_has_preferred_mode(connector, width, height);
+               if (!modes[i]) {
+                       list_for_each_entry(modes[i], &connector->modes, head)
+                               break;
+               }
+               i++;
+       }
+       return true;
+}
+
+static int drm_pick_crtcs(struct drm_device *dev,
+                         struct drm_crtc **best_crtcs,
+                         struct drm_display_mode **modes,
+                         int n, int width, int height)
+{
+       int c, o;
+       struct drm_connector *connector;
+       struct drm_connector_helper_funcs *connector_funcs;
+       struct drm_encoder *encoder;
+       struct drm_crtc *best_crtc;
+       int my_score, best_score, score;
+       struct drm_crtc **crtcs, *crtc;
+
+       if (n == dev->mode_config.num_connector)
+               return 0;
+       c = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (c == n)
+                       break;
+               c++;
+       }
+
+       best_crtcs[n] = NULL;
+       best_crtc = NULL;
+       best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
+       if (modes[n] == NULL)
+               return best_score;
+
+       crtcs = kmalloc(dev->mode_config.num_connector *
+                       sizeof(struct drm_crtc *), GFP_KERNEL);
+       if (!crtcs)
+               return best_score;
+
+       my_score = 1;
+       if (connector->status == connector_status_connected)
+               my_score++;
+       if (drm_has_preferred_mode(connector, width, height))
+               my_score++;
+
+       connector_funcs = connector->helper_private;
+       encoder = connector_funcs->best_encoder(connector);
+       if (!encoder)
+               goto out;
+
+       connector->encoder = encoder;
+
+       /* select a crtc for this connector and then attempt to configure
+          remaining connectors */
+       c = 0;
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+               if ((connector->encoder->possible_crtcs & (1 << c)) == 0) {
+                       c++;
+                       continue;
+               }
+
+               for (o = 0; o < n; o++)
+                       if (best_crtcs[o] == crtc)
+                               break;
+
+               if (o < n) {
+                       /* ignore cloning for now */
+                       c++;
+                       continue;
+               }
+
+               crtcs[n] = crtc;
+               memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
+               score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
+                                                 width, height);
+               if (score > best_score) {
+                       best_crtc = crtc;
+                       best_score = score;
+                       memcpy(best_crtcs, crtcs,
+                              dev->mode_config.num_connector *
+                              sizeof(struct drm_crtc *));
+               }
+               c++;
+       }
+out:
+       kfree(crtcs);
+       return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_device *dev)
+{
+       struct drm_crtc **crtcs;
+       struct drm_display_mode **modes;
+       struct drm_encoder *encoder;
+       struct drm_connector *connector;
+       bool *enabled;
+       int width, height;
+       int i, ret;
+
+       width = dev->mode_config.max_width;
+       height = dev->mode_config.max_height;
+
+       /* clean out all the encoder/crtc combos */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               encoder->crtc = NULL;
+       }
+
+       crtcs = kcalloc(dev->mode_config.num_connector,
+                       sizeof(struct drm_crtc *), GFP_KERNEL);
+       modes = kcalloc(dev->mode_config.num_connector,
+                       sizeof(struct drm_display_mode *), GFP_KERNEL);
+       enabled = kcalloc(dev->mode_config.num_connector,
+                         sizeof(bool), GFP_KERNEL);
+
+       drm_enable_connectors(dev, enabled);
+
+       ret = drm_target_preferred(dev, modes, enabled, width, height);
+       if (!ret)
+               DRM_ERROR("Unable to find initial modes\n");
+
+       drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
+
+       i = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct drm_display_mode *mode = modes[i];
+               struct drm_crtc *crtc = crtcs[i];
+
+               if (connector->encoder == NULL) {
+                       i++;
+                       continue;
+               }
+
+               if (mode && crtc) {
+                       crtc->desired_mode = mode;
+                       connector->encoder->crtc = crtc;
+               } else
+                       connector->encoder->crtc = NULL;
+               i++;
+       }
+
+       kfree(crtcs);
+       kfree(modes);
+       kfree(enabled);
+}
+/**
+ * drm_crtc_set_mode - set a mode
+ * @crtc: CRTC to program
+ * @mode: mode to use
+ * @x: width of mode
+ * @y: height of mode
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Try to set @mode on @crtc.  Give @crtc and its associated connectors a chance
+ * to fixup or reject the mode prior to trying to set it.
+ *
+ * RETURNS:
+ * True if the mode was set successfully, or false otherwise.
+ */
+bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+                             struct drm_display_mode *mode,
+                             int x, int y,
+                             struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_display_mode *adjusted_mode, saved_mode;
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       struct drm_encoder_helper_funcs *encoder_funcs;
+       int saved_x, saved_y;
+       struct drm_encoder *encoder;
+       bool ret = true;
+
+       adjusted_mode = drm_mode_duplicate(dev, mode);
+
+       crtc->enabled = drm_helper_crtc_in_use(crtc);
+
+       if (!crtc->enabled)
+               return true;
+
+       saved_mode = crtc->mode;
+       saved_x = crtc->x;
+       saved_y = crtc->y;
+
+       /* Update crtc values up front so the driver can rely on them for mode
+        * setting.
+        */
+       crtc->mode = *mode;
+       crtc->x = x;
+       crtc->y = y;
+
+       if (drm_mode_equal(&saved_mode, &crtc->mode)) {
+               if (saved_x != crtc->x || saved_y != crtc->y) {
+                       crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y,
+                                                 old_fb);
+                       goto done;
+               }
+       }
+
+       /* Pass our mode to the connectors and the CRTC to give them a chance to
+        * adjust it according to limitations or connector properties, and also
+        * a chance to reject the mode entirely.
+        */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+               if (encoder->crtc != crtc)
+                       continue;
+               encoder_funcs = encoder->helper_private;
+               if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
+                                                     adjusted_mode))) {
+                       goto done;
+               }
+       }
+
+       if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
+               goto done;
+       }
+
+       /* Prepare the encoders and CRTCs before setting the mode. */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+               if (encoder->crtc != crtc)
+                       continue;
+               encoder_funcs = encoder->helper_private;
+               /* Disable the encoders as the first thing we do. */
+               encoder_funcs->prepare(encoder);
+       }
+
+       crtc_funcs->prepare(crtc);
+
+       /* Set up the DPLL and any encoders state that needs to adjust or depend
+        * on the DPLL.
+        */
+       crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+               if (encoder->crtc != crtc)
+                       continue;
+
+               DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
+                        mode->name, mode->base.id);
+               encoder_funcs = encoder->helper_private;
+               encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+       }
+
+       /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+       crtc_funcs->commit(crtc);
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+               if (encoder->crtc != crtc)
+                       continue;
+
+               encoder_funcs = encoder->helper_private;
+               encoder_funcs->commit(encoder);
+
+       }
+
+       /* XXX free adjustedmode */
+       drm_mode_destroy(dev, adjusted_mode);
+       /* FIXME: add subpixel order */
+done:
+       if (!ret) {
+               crtc->mode = saved_mode;
+               crtc->x = saved_x;
+               crtc->y = saved_y;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+
+
+/**
+ * drm_crtc_helper_set_config - set a new config from userspace
+ * @crtc: CRTC to setup
+ * @crtc_info: user provided configuration
+ * @new_mode: new mode to set
+ * @connector_set: set of connectors for the new config
+ * @fb: new framebuffer
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Setup a new configuration, provided by the user in @crtc_info, and enable
+ * it.
+ *
+ * RETURNS:
+ * Zero. (FIXME)
+ */
+int drm_crtc_helper_set_config(struct drm_mode_set *set)
+{
+       struct drm_device *dev;
+       struct drm_crtc **save_crtcs, *new_crtc;
+       struct drm_encoder **save_encoders, *new_encoder;
+       struct drm_framebuffer *old_fb;
+       bool save_enabled;
+       bool changed = false;
+       bool flip_or_move = false;
+       struct drm_connector *connector;
+       int count = 0, ro, fail = 0;
+       struct drm_crtc_helper_funcs *crtc_funcs;
+       int ret = 0;
+
+       DRM_DEBUG("\n");
+
+       if (!set)
+               return -EINVAL;
+
+       if (!set->crtc)
+               return -EINVAL;
+
+       if (!set->crtc->helper_private)
+               return -EINVAL;
+
+       crtc_funcs = set->crtc->helper_private;
+
+       DRM_DEBUG("crtc: %p %d fb: %p connectors: %p num_connectors: %d (x, y) (%i, %i)\n",
+                 set->crtc, set->crtc->base.id, set->fb, set->connectors,
+                 (int)set->num_connectors, set->x, set->y);
+
+       dev = set->crtc->dev;
+
+       /* save previous config */
+       save_enabled = set->crtc->enabled;
+
+       /* this is meant to be num_connector not num_crtc */
+       save_crtcs = kzalloc(dev->mode_config.num_connector *
+                            sizeof(struct drm_crtc *), GFP_KERNEL);
+       if (!save_crtcs)
+               return -ENOMEM;
+
+       save_encoders = kzalloc(dev->mode_config.num_connector *
+                               sizeof(struct drm_encoders *), GFP_KERNEL);
+       if (!save_encoders) {
+               kfree(save_crtcs);
+               return -ENOMEM;
+       }
+
+       /* We should be able to check here if the fb has the same properties
+        * and then just flip_or_move it */
+       if (set->crtc->fb != set->fb) {
+               /* if we have no fb then its a change not a flip */
+               if (set->crtc->fb == NULL)
+                       changed = true;
+               else
+                       flip_or_move = true;
+       }
+
+       if (set->x != set->crtc->x || set->y != set->crtc->y)
+               flip_or_move = true;
+
+       if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+               DRM_DEBUG("modes are different\n");
+               drm_mode_debug_printmodeline(&set->crtc->mode);
+               drm_mode_debug_printmodeline(set->mode);
+               changed = true;
+       }
+
+       /* a) traverse passed in connector list and get encoders for them */
+       count = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct drm_connector_helper_funcs *connector_funcs =
+                       connector->helper_private;
+               save_encoders[count++] = connector->encoder;
+               new_encoder = connector->encoder;
+               for (ro = 0; ro < set->num_connectors; ro++) {
+                       if (set->connectors[ro] == connector) {
+                               new_encoder = connector_funcs->best_encoder(connector);
+                               /* if we can't get an encoder for a connector
+                                  we are setting now - then fail */
+                               if (new_encoder == NULL)
+                                       /* don't break so fail path works correct */
+                                       fail = 1;
+                               break;
+                       }
+               }
+
+               if (new_encoder != connector->encoder) {
+                       changed = true;
+                       connector->encoder = new_encoder;
+               }
+       }
+
+       if (fail) {
+               ret = -EINVAL;
+               goto fail_no_encoder;
+       }
+
+       count = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (!connector->encoder)
+                       continue;
+
+               save_crtcs[count++] = connector->encoder->crtc;
+
+               if (connector->encoder->crtc == set->crtc)
+                       new_crtc = NULL;
+               else
+                       new_crtc = connector->encoder->crtc;
+
+               for (ro = 0; ro < set->num_connectors; ro++) {
+                       if (set->connectors[ro] == connector)
+                               new_crtc = set->crtc;
+               }
+               if (new_crtc != connector->encoder->crtc) {
+                       changed = true;
+                       connector->encoder->crtc = new_crtc;
+               }
+       }
+
+       /* mode_set_base is not a required function */
+       if (flip_or_move && !crtc_funcs->mode_set_base)
+               changed = true;
+
+       if (changed) {
+               old_fb = set->crtc->fb;
+               set->crtc->fb = set->fb;
+               set->crtc->enabled = (set->mode != NULL);
+               if (set->mode != NULL) {
+                       DRM_DEBUG("attempting to set mode from userspace\n");
+                       drm_mode_debug_printmodeline(set->mode);
+                       if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
+                                                     set->x, set->y,
+                                                     old_fb)) {
+                               ret = -EINVAL;
+                               goto fail_set_mode;
+                       }
+                       /* TODO are these needed? */
+                       set->crtc->desired_x = set->x;
+                       set->crtc->desired_y = set->y;
+                       set->crtc->desired_mode = set->mode;
+               }
+               drm_helper_disable_unused_functions(dev);
+       } else if (flip_or_move) {
+               old_fb = set->crtc->fb;
+               if (set->crtc->fb != set->fb)
+                       set->crtc->fb = set->fb;
+               crtc_funcs->mode_set_base(set->crtc, set->x, set->y, old_fb);
+       }
+
+       kfree(save_encoders);
+       kfree(save_crtcs);
+       return 0;
+
+fail_set_mode:
+       set->crtc->enabled = save_enabled;
+       count = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               connector->encoder->crtc = save_crtcs[count++];
+fail_no_encoder:
+       kfree(save_crtcs);
+       count = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               connector->encoder = save_encoders[count++];
+       }
+       kfree(save_encoders);
+       return ret;
+}
+EXPORT_SYMBOL(drm_crtc_helper_set_config);
+
+bool drm_helper_plugged_event(struct drm_device *dev)
+{
+       DRM_DEBUG("\n");
+
+       drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
+                                        dev->mode_config.max_height);
+
+       drm_setup_crtcs(dev);
+
+       /* alert the driver fb layer */
+       dev->mode_config.funcs->fb_changed(dev);
+
+       /* FIXME: send hotplug event */
+       return true;
+}
+/**
+ * drm_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+ * @can_grow: this configuration is growable
+ *
+ * LOCKING:
+ * Called at init time, must take mode config lock.
+ *
+ * Scan the CRTCs and connectors and try to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_helper_initial_config(struct drm_device *dev, bool can_grow)
+{
+       int ret = false;
+
+       drm_helper_plugged_event(dev);
+       return ret;
+}
+EXPORT_SYMBOL(drm_helper_initial_config);
+
+/**
+ * drm_hotplug_stage_two
+ * @dev DRM device
+ * @connector hotpluged connector
+ *
+ * LOCKING.
+ * Caller must hold mode config lock, function might grab struct lock.
+ *
+ * Stage two of a hotplug.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_helper_hotplug_stage_two(struct drm_device *dev)
+{
+       drm_helper_plugged_event(dev);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
+
+int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+                                  struct drm_mode_fb_cmd *mode_cmd)
+{
+       fb->width = mode_cmd->width;
+       fb->height = mode_cmd->height;
+       fb->pitch = mode_cmd->pitch;
+       fb->bits_per_pixel = mode_cmd->bpp;
+       fb->depth = mode_cmd->depth;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
+
+int drm_helper_resume_force_mode(struct drm_device *dev)
+{
+       struct drm_crtc *crtc;
+       int ret;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+               if (!crtc->enabled)
+                       continue;
+
+               ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
+                                              crtc->x, crtc->y, crtc->fb);
+
+               if (ret == false)
+                       DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_helper_resume_force_mode);
index 996097acb5e76b3c3d7698c0885f563ad9248522..febb517ee679255a7e3ea694f8b68225118b95f0 100644 (file)
@@ -74,6 +74,9 @@ static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
 
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+
        DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -123,6 +126,23 @@ static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
+
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
 };
 
 #define DRM_CORE_IOCTL_COUNT   ARRAY_SIZE( drm_ioctls )
@@ -138,8 +158,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
  */
 int drm_lastclose(struct drm_device * dev)
 {
-       struct drm_magic_entry *pt, *next;
-       struct drm_map_list *r_list, *list_t;
        struct drm_vma_entry *vma, *vma_temp;
        int i;
 
@@ -149,13 +167,7 @@ int drm_lastclose(struct drm_device * dev)
                dev->driver->lastclose(dev);
        DRM_DEBUG("driver lastclose completed\n");
 
-       if (dev->unique) {
-               drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
-               dev->unique = NULL;
-               dev->unique_len = 0;
-       }
-
-       if (dev->irq_enabled)
+       if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
                drm_irq_uninstall(dev);
 
        mutex_lock(&dev->struct_mutex);
@@ -164,18 +176,9 @@ int drm_lastclose(struct drm_device * dev)
        drm_drawable_free_all(dev);
        del_timer(&dev->timer);
 
-       /* Clear pid list */
-       if (dev->magicfree.next) {
-               list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
-                       list_del(&pt->head);
-                       drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
-                       drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
-               }
-               drm_ht_remove(&dev->magiclist);
-       }
-
        /* Clear AGP information */
-       if (drm_core_has_AGP(dev) && dev->agp) {
+       if (drm_core_has_AGP(dev) && dev->agp &&
+           !drm_core_check_feature(dev, DRIVER_MODESET)) {
                struct drm_agp_mem *entry, *tempe;
 
                /* Remove AGP resources, but leave dev->agp
@@ -194,7 +197,8 @@ int drm_lastclose(struct drm_device * dev)
                dev->agp->acquired = 0;
                dev->agp->enabled = 0;
        }
-       if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
+       if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+           !drm_core_check_feature(dev, DRIVER_MODESET)) {
                drm_sg_cleanup(dev->sg);
                dev->sg = NULL;
        }
@@ -205,13 +209,6 @@ int drm_lastclose(struct drm_device * dev)
                drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
        }
 
-       list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
-               if (!(r_list->map->flags & _DRM_DRIVER)) {
-                       drm_rmmap_locked(dev, r_list->map);
-                       r_list = NULL;
-               }
-       }
-
        if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
                for (i = 0; i < dev->queue_count; i++) {
                        if (dev->queuelist[i]) {
@@ -228,14 +225,11 @@ int drm_lastclose(struct drm_device * dev)
        }
        dev->queue_count = 0;
 
-       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !drm_core_check_feature(dev, DRIVER_MODESET))
                drm_dma_takedown(dev);
 
-       if (dev->lock.hw_lock) {
-               dev->sigdata.lock = dev->lock.hw_lock = NULL;   /* SHM removed */
-               dev->lock.file_priv = NULL;
-               wake_up_interruptible(&dev->lock.lock_queue);
-       }
+       dev->dev_mapping = NULL;
        mutex_unlock(&dev->struct_mutex);
 
        DRM_DEBUG("lastclose completed\n");
@@ -263,6 +257,8 @@ int drm_init(struct drm_driver *driver)
 
        DRM_DEBUG("\n");
 
+       INIT_LIST_HEAD(&driver->device_list);
+
        for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
                pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
 
@@ -329,35 +325,24 @@ static void drm_cleanup(struct drm_device * dev)
        drm_ht_remove(&dev->map_hash);
        drm_ctxbitmap_cleanup(dev);
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_put_minor(&dev->control);
+
+       if (dev->driver->driver_features & DRIVER_GEM)
+               drm_gem_destroy(dev);
+
        drm_put_minor(&dev->primary);
        if (drm_put_dev(dev))
                DRM_ERROR("Cannot unload module\n");
 }
 
-static int drm_minors_cleanup(int id, void *ptr, void *data)
-{
-       struct drm_minor *minor = ptr;
-       struct drm_device *dev;
-       struct drm_driver *driver = data;
-
-       dev = minor->dev;
-       if (minor->dev->driver != driver)
-               return 0;
-
-       if (minor->type != DRM_MINOR_LEGACY)
-               return 0;
-
-       if (dev)
-               pci_dev_put(dev->pdev);
-       drm_cleanup(dev);
-       return 1;
-}
-
 void drm_exit(struct drm_driver *driver)
 {
+       struct drm_device *dev, *tmp;
        DRM_DEBUG("\n");
 
-       idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver);
+       list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+               drm_cleanup(dev);
 
        DRM_INFO("Module unloaded\n");
 }
@@ -503,7 +488,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
                retcode = -EINVAL;
        } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
                   ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
-                  ((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
+                  ((ioctl->flags & DRM_MASTER) && !file_priv->is_master)) {
                retcode = -EACCES;
        } else {
                if (cmd & (IOC_IN | IOC_OUT)) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
new file mode 100644 (file)
index 0000000..0fbb0da
--- /dev/null
@@ -0,0 +1,732 @@
+/*
+ * Copyright (c) 2006 Luc Verhaegen (quirks list)
+ * Copyright (c) 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
+ * FB layer.
+ *   Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm_edid.h"
+
+/*
+ * TODO:
+ *   - support EDID 1.4 (incl. CE blocks)
+ */
+
+/*
+ * EDID blocks out in the wild have a variety of bugs, try to collect
+ * them here (note that userspace may work around broken monitors first,
+ * but fixes should make their way here so that the kernel "just works"
+ * on as many displays as possible).
+ */
+
+/* First detailed mode wrong, use largest 60Hz mode */
+#define EDID_QUIRK_PREFER_LARGE_60             (1 << 0)
+/* Reported 135MHz pixel clock is too high, needs adjustment */
+#define EDID_QUIRK_135_CLOCK_TOO_HIGH          (1 << 1)
+/* Prefer the largest mode at 75 Hz */
+#define EDID_QUIRK_PREFER_LARGE_75             (1 << 2)
+/* Detail timing is in cm not mm */
+#define EDID_QUIRK_DETAILED_IN_CM              (1 << 3)
+/* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE   (1 << 4)
+/* Monitor forgot to set the first detailed is preferred bit. */
+#define EDID_QUIRK_FIRST_DETAILED_PREFERRED    (1 << 5)
+/* use +hsync +vsync for detailed mode */
+#define EDID_QUIRK_DETAILED_SYNC_PP            (1 << 6)
+
+static struct edid_quirk {
+       char *vendor;
+       int product_id;
+       u32 quirks;
+} edid_quirk_list[] = {
+       /* Acer AL1706 */
+       { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+       /* Acer F51 */
+       { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
+       /* Unknown Acer */
+       { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+       /* Belinea 10 15 55 */
+       { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
+       { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+
+       /* Envision Peripherals, Inc. EN-7100e */
+       { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+
+       /* Funai Electronics PM36B */
+       { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+         EDID_QUIRK_DETAILED_IN_CM },
+
+       /* LG Philips LCD LP154W01-A5 */
+       { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+       { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+
+       /* Philips 107p5 CRT */
+       { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+       /* Proview AY765C */
+       { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+       /* Samsung SyncMaster 205BW.  Note: irony */
+       { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
+       /* Samsung SyncMaster 22[5-6]BW */
+       { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
+       { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+};
+
+
+/* Valid EDID header has these bytes */
+static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+
+/**
+ * edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity check the EDID block by looking at the header, the version number
+ * and the checksum.  Return 0 if the EDID doesn't check out, or 1 if it's
+ * valid.
+ */
+static bool edid_is_valid(struct edid *edid)
+{
+       int i;
+       u8 csum = 0;
+       u8 *raw_edid = (u8 *)edid;
+
+       if (memcmp(edid->header, edid_header, sizeof(edid_header)))
+               goto bad;
+       if (edid->version != 1) {
+               DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+               goto bad;
+       }
+       if (edid->revision <= 0 || edid->revision > 3) {
+               DRM_ERROR("EDID has minor version %d, which is not between 0-3\n", edid->revision);
+               goto bad;
+       }
+
+       for (i = 0; i < EDID_LENGTH; i++)
+               csum += raw_edid[i];
+       if (csum) {
+               DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+               goto bad;
+       }
+
+       return 1;
+
+bad:
+       if (raw_edid) {
+               DRM_ERROR("Raw EDID:\n");
+               print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
+               printk("\n");
+       }
+       return 0;
+}
+
+/**
+ * edid_vendor - match a string against EDID's obfuscated vendor field
+ * @edid: EDID to match
+ * @vendor: vendor string
+ *
+ * Returns true if @vendor is in @edid, false otherwise
+ */
+static bool edid_vendor(struct edid *edid, char *vendor)
+{
+       char edid_vendor[3];
+
+       edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
+       edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
+                         ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
+       edid_vendor[2] = (edid->mfg_id[2] & 0x1f) + '@';
+
+       return !strncmp(edid_vendor, vendor, 3);
+}
+
+/**
+ * edid_get_quirks - return quirk flags for a given EDID
+ * @edid: EDID to process
+ *
+ * This tells subsequent routines what fixes they need to apply.
+ */
+static u32 edid_get_quirks(struct edid *edid)
+{
+       struct edid_quirk *quirk;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+               quirk = &edid_quirk_list[i];
+
+               if (edid_vendor(edid, quirk->vendor) &&
+                   (EDID_PRODUCT_ID(edid) == quirk->product_id))
+                       return quirk->quirks;
+       }
+
+       return 0;
+}
+
+#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
+#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
+
+
+/**
+ * edid_fixup_preferred - set preferred modes based on quirk list
+ * @connector: has mode list to fix up
+ * @quirks: quirks list
+ *
+ * Walk the mode list for @connector, clearing the preferred status
+ * on existing modes and setting it anew for the right mode ala @quirks.
+ */
+static void edid_fixup_preferred(struct drm_connector *connector,
+                                u32 quirks)
+{
+       struct drm_display_mode *t, *cur_mode, *preferred_mode;
+       int target_refresh = 0;
+
+       if (list_empty(&connector->probed_modes))
+               return;
+
+       if (quirks & EDID_QUIRK_PREFER_LARGE_60)
+               target_refresh = 60;
+       if (quirks & EDID_QUIRK_PREFER_LARGE_75)
+               target_refresh = 75;
+
+       preferred_mode = list_first_entry(&connector->probed_modes,
+                                         struct drm_display_mode, head);
+
+       list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
+               cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+
+               if (cur_mode == preferred_mode)
+                       continue;
+
+               /* Largest mode is preferred */
+               if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
+                       preferred_mode = cur_mode;
+
+               /* At a given size, try to get closest to target refresh */
+               if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
+                   MODE_REFRESH_DIFF(cur_mode, target_refresh) <
+                   MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
+                       preferred_mode = cur_mode;
+               }
+       }
+
+       preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
+}
+
+/**
+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @t: standard timing params
+ *
+ * Take the standard timing params (in this case width, aspect, and refresh)
+ * and convert them into a real mode using CVT.
+ *
+ * Punts for now, but should eventually use the FB layer's CVT based mode
+ * generation code.
+ */
+struct drm_display_mode *drm_mode_std(struct drm_device *dev,
+                                     struct std_timing *t)
+{
+       struct drm_display_mode *mode;
+       int hsize = t->hsize * 8 + 248, vsize;
+
+       mode = drm_mode_create(dev);
+       if (!mode)
+               return NULL;
+
+       if (t->aspect_ratio == 0)
+               vsize = (hsize * 10) / 16;
+       else if (t->aspect_ratio == 1)
+               vsize = (hsize * 3) / 4;
+       else if (t->aspect_ratio == 2)
+               vsize = (hsize * 4) / 5;
+       else
+               vsize = (hsize * 9) / 16;
+
+       drm_mode_set_name(mode);
+
+       return mode;
+}
+
+/**
+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
+ * @dev: DRM device (needed to create new mode)
+ * @edid: EDID block
+ * @timing: EDID detailed timing info
+ * @quirks: quirks to apply
+ *
+ * An EDID detailed timing block contains enough info for us to create and
+ * return a new struct drm_display_mode.
+ */
+static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+                                                 struct edid *edid,
+                                                 struct detailed_timing *timing,
+                                                 u32 quirks)
+{
+       struct drm_display_mode *mode;
+       struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+
+       if (pt->stereo) {
+               printk(KERN_WARNING "stereo mode not supported\n");
+               return NULL;
+       }
+       if (!pt->separate_sync) {
+               printk(KERN_WARNING "integrated sync not supported\n");
+               return NULL;
+       }
+
+       mode = drm_mode_create(dev);
+       if (!mode)
+               return NULL;
+
+       mode->type = DRM_MODE_TYPE_DRIVER;
+
+       if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+               timing->pixel_clock = 1088;
+
+       mode->clock = timing->pixel_clock * 10;
+
+       mode->hdisplay = (pt->hactive_hi << 8) | pt->hactive_lo;
+       mode->hsync_start = mode->hdisplay + ((pt->hsync_offset_hi << 8) |
+                                             pt->hsync_offset_lo);
+       mode->hsync_end = mode->hsync_start +
+               ((pt->hsync_pulse_width_hi << 8) |
+                pt->hsync_pulse_width_lo);
+       mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo);
+
+       mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo;
+       mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) |
+                                             pt->vsync_offset_lo);
+       mode->vsync_end = mode->vsync_start +
+               ((pt->vsync_pulse_width_hi << 8) |
+                pt->vsync_pulse_width_lo);
+       mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo);
+
+       drm_mode_set_name(mode);
+
+       if (pt->interlaced)
+               mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+       if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+               pt->hsync_positive = 1;
+               pt->vsync_positive = 1;
+       }
+
+       mode->flags |= pt->hsync_positive ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+       mode->flags |= pt->vsync_positive ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+
+       mode->width_mm = pt->width_mm_lo | (pt->width_mm_hi << 8);
+       mode->height_mm = pt->height_mm_lo | (pt->height_mm_hi << 8);
+
+       if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
+               mode->width_mm *= 10;
+               mode->height_mm *= 10;
+       }
+
+       if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+               mode->width_mm = edid->width_cm * 10;
+               mode->height_mm = edid->height_cm * 10;
+       }
+
+       return mode;
+}
+
+/*
+ * Detailed mode info for the EDID "established modes" data to use.
+ */
+static struct drm_display_mode edid_est_modes[] = {
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+                  968, 1056, 0, 600, 601, 605, 628, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+                  896, 1024, 0, 600, 601, 603,  625, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+                  720, 840, 0, 480, 481, 484, 500, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+                  704,  832, 0, 480, 489, 491, 520, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+                  768,  864, 0, 480, 483, 486, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+                  752, 800, 0, 480, 490, 492, 525, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+       { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+                  846, 900, 0, 400, 421, 423,  449, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+       { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+                  846,  900, 0, 400, 412, 414, 449, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+       { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+                  1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+                  1136, 1312, 0,  768, 769, 772, 800, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+                  1184, 1328, 0,  768, 771, 777, 806, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+                  1184, 1344, 0,  768, 771, 777, 806, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+                  1208, 1264, 0, 768, 768, 776, 817, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+       { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+                  928, 1152, 0, 624, 625, 628, 667, 0,
+                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+                  896, 1056, 0, 600, 601, 604,  625, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+       { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+                  976, 1040, 0, 600, 637, 643, 666, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+       { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+                  1344, 1600, 0,  864, 865, 868, 900, 0,
+                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
+
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above).  Tease them out and add them to the global modes list.
+ */
+static int add_established_modes(struct drm_connector *connector, struct edid *edid)
+{
+       struct drm_device *dev = connector->dev;
+       unsigned long est_bits = edid->established_timings.t1 |
+               (edid->established_timings.t2 << 8) |
+               ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+       int i, modes = 0;
+
+       for (i = 0; i <= EDID_EST_TIMINGS; i++)
+               if (est_bits & (1<<i)) {
+                       struct drm_display_mode *newmode;
+                       newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+                       if (newmode) {
+                               drm_mode_probed_add(connector, newmode);
+                               modes++;
+                       }
+               }
+
+       return modes;
+}
+
+/**
+ * add_standard_modes - get std. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Standard modes can be calculated using the CVT standard.  Grab them from
+ * @edid, calculate them, and add them to the list.
+ */
+static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
+{
+       struct drm_device *dev = connector->dev;
+       int i, modes = 0;
+
+       for (i = 0; i < EDID_STD_TIMINGS; i++) {
+               struct std_timing *t = &edid->standard_timings[i];
+               struct drm_display_mode *newmode;
+
+               /* If std timings bytes are 1, 1 it's empty */
+               if (t->hsize == 1 && (t->aspect_ratio | t->vfreq) == 1)
+                       continue;
+
+               newmode = drm_mode_std(dev, &edid->standard_timings[i]);
+               if (newmode) {
+                       drm_mode_probed_add(connector, newmode);
+                       modes++;
+               }
+       }
+
+       return modes;
+}
+
+/**
+ * add_detailed_modes - get detailed mode info from EDID data
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+ *
+ * Some of the detailed timing sections may contain mode information.  Grab
+ * it and add it to the list.
+ */
+static int add_detailed_info(struct drm_connector *connector,
+                            struct edid *edid, u32 quirks)
+{
+       struct drm_device *dev = connector->dev;
+       int i, j, modes = 0;
+
+       for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
+               struct detailed_timing *timing = &edid->detailed_timings[i];
+               struct detailed_non_pixel *data = &timing->data.other_data;
+               struct drm_display_mode *newmode;
+
+               /* EDID up to and including 1.2 may put monitor info here */
+               if (edid->version == 1 && edid->revision < 3)
+                       continue;
+
+               /* Detailed mode timing */
+               if (timing->pixel_clock) {
+                       newmode = drm_mode_detailed(dev, edid, timing, quirks);
+                       if (!newmode)
+                               continue;
+
+                       /* First detailed mode is preferred */
+                       if (i == 0 && edid->preferred_timing)
+                               newmode->type |= DRM_MODE_TYPE_PREFERRED;
+                       drm_mode_probed_add(connector, newmode);
+
+                       modes++;
+                       continue;
+               }
+
+               /* Other timing or info */
+               switch (data->type) {
+               case EDID_DETAIL_MONITOR_SERIAL:
+                       break;
+               case EDID_DETAIL_MONITOR_STRING:
+                       break;
+               case EDID_DETAIL_MONITOR_RANGE:
+                       /* Get monitor range data */
+                       break;
+               case EDID_DETAIL_MONITOR_NAME:
+                       break;
+               case EDID_DETAIL_MONITOR_CPDATA:
+                       break;
+               case EDID_DETAIL_STD_MODES:
+                       /* Five modes per detailed section */
+                       for (j = 0; j < 5; i++) {
+                               struct std_timing *std;
+                               struct drm_display_mode *newmode;
+
+                               std = &data->data.timings[j];
+                               newmode = drm_mode_std(dev, std);
+                               if (newmode) {
+                                       drm_mode_probed_add(connector, newmode);
+                                       modes++;
+                               }
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return modes;
+}
+
+#define DDC_ADDR 0x50
+
+unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter)
+{
+       unsigned char start = 0x0;
+       unsigned char *buf = kmalloc(EDID_LENGTH, GFP_KERNEL);
+       struct i2c_msg msgs[] = {
+               {
+                       .addr   = DDC_ADDR,
+                       .flags  = 0,
+                       .len    = 1,
+                       .buf    = &start,
+               }, {
+                       .addr   = DDC_ADDR,
+                       .flags  = I2C_M_RD,
+                       .len    = EDID_LENGTH,
+                       .buf    = buf,
+               }
+       };
+
+       if (!buf) {
+               dev_warn(&adapter->dev, "unable to allocate memory for EDID "
+                        "block.\n");
+               return NULL;
+       }
+
+       if (i2c_transfer(adapter, msgs, 2) == 2)
+               return buf;
+
+       dev_info(&adapter->dev, "unable to read EDID block.\n");
+       kfree(buf);
+       return NULL;
+}
+EXPORT_SYMBOL(drm_do_probe_ddc_edid);
+
+static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
+{
+       struct i2c_algo_bit_data *algo_data = adapter->algo_data;
+       unsigned char *edid = NULL;
+       int i, j;
+
+       algo_data->setscl(algo_data->data, 1);
+
+       for (i = 0; i < 1; i++) {
+               /* For some old monitors we need the
+                * following process to initialize/stop DDC
+                */
+               algo_data->setsda(algo_data->data, 1);
+               msleep(13);
+
+               algo_data->setscl(algo_data->data, 1);
+               for (j = 0; j < 5; j++) {
+                       msleep(10);
+                       if (algo_data->getscl(algo_data->data))
+                               break;
+               }
+               if (j == 5)
+                       continue;
+
+               algo_data->setsda(algo_data->data, 0);
+               msleep(15);
+               algo_data->setscl(algo_data->data, 0);
+               msleep(15);
+               algo_data->setsda(algo_data->data, 1);
+               msleep(15);
+
+               /* Do the real work */
+               edid = drm_do_probe_ddc_edid(adapter);
+               algo_data->setsda(algo_data->data, 0);
+               algo_data->setscl(algo_data->data, 0);
+               msleep(15);
+
+               algo_data->setscl(algo_data->data, 1);
+               for (j = 0; j < 10; j++) {
+                       msleep(10);
+                       if (algo_data->getscl(algo_data->data))
+                               break;
+               }
+
+               algo_data->setsda(algo_data->data, 1);
+               msleep(15);
+               algo_data->setscl(algo_data->data, 0);
+               algo_data->setsda(algo_data->data, 0);
+               if (edid)
+                       break;
+       }
+       /* Release the DDC lines when done or the Apple Cinema HD display
+        * will switch off
+        */
+       algo_data->setsda(algo_data->data, 1);
+       algo_data->setscl(algo_data->data, 1);
+
+       return edid;
+}
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given connector's i2c channel to grab EDID data if possible.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+                         struct i2c_adapter *adapter)
+{
+       struct edid *edid;
+
+       edid = (struct edid *)drm_ddc_read(adapter);
+       if (!edid) {
+               dev_warn(&connector->dev->pdev->dev, "%s: no EDID data\n",
+                        drm_get_connector_name(connector));
+               return NULL;
+       }
+       if (!edid_is_valid(edid)) {
+               dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+                        drm_get_connector_name(connector));
+               kfree(edid);
+               return NULL;
+       }
+
+       connector->display_info.raw_edid = (char *)edid;
+
+       return edid;
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/**
+ * drm_add_edid_modes - add modes from EDID data, if available
+ * @connector: connector we're probing
+ * @edid: edid data
+ *
+ * Add the specified modes to the connector's mode list.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+{
+       int num_modes = 0;
+       u32 quirks;
+
+       if (edid == NULL) {
+               return 0;
+       }
+       if (!edid_is_valid(edid)) {
+               dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+                        drm_get_connector_name(connector));
+               return 0;
+       }
+
+       quirks = edid_get_quirks(edid);
+
+       num_modes += add_established_modes(connector, edid);
+       num_modes += add_standard_modes(connector, edid);
+       num_modes += add_detailed_info(connector, edid, quirks);
+
+       if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+               edid_fixup_preferred(connector, quirks);
+
+       connector->display_info.serration_vsync = edid->serration_vsync;
+       connector->display_info.sync_on_green = edid->sync_on_green;
+       connector->display_info.composite_sync = edid->composite_sync;
+       connector->display_info.separate_syncs = edid->separate_syncs;
+       connector->display_info.blank_to_black = edid->blank_to_black;
+       connector->display_info.video_level = edid->video_level;
+       connector->display_info.digital = edid->digital;
+       connector->display_info.width_mm = edid->width_cm * 10;
+       connector->display_info.height_mm = edid->height_cm * 10;
+       connector->display_info.gamma = edid->gamma;
+       connector->display_info.gtf_supported = edid->default_gtf;
+       connector->display_info.standard_color = edid->standard_color;
+       connector->display_info.display_type = edid->display_type;
+       connector->display_info.active_off_supported = edid->pm_active_off;
+       connector->display_info.suspend_supported = edid->pm_suspend;
+       connector->display_info.standby_supported = edid->pm_standby;
+       connector->display_info.gamma = edid->gamma;
+
+       return num_modes;
+}
+EXPORT_SYMBOL(drm_add_edid_modes);
index 78eeed5caaff1ed68ed01cb2cd27a2ed156caa49..3733e36d135ed2700e221fb6609a0b8809c481c5 100644 (file)
@@ -35,7 +35,6 @@
  */
 
 #include "drmP.h"
-#include "drm_sarea.h"
 #include <linux/poll.h>
 #include <linux/smp_lock.h>
 
@@ -44,10 +43,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 
 static int drm_setup(struct drm_device * dev)
 {
-       drm_local_map_t *map;
        int i;
        int ret;
-       u32 sareapage;
 
        if (dev->driver->firstopen) {
                ret = dev->driver->firstopen(dev);
@@ -55,20 +52,14 @@ static int drm_setup(struct drm_device * dev)
                        return ret;
        }
 
-       dev->magicfree.next = NULL;
-
-       /* prebuild the SAREA */
-       sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE);
-       i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
-       if (i != 0)
-               return i;
-
        atomic_set(&dev->ioctl_count, 0);
        atomic_set(&dev->vma_count, 0);
-       dev->buf_use = 0;
-       atomic_set(&dev->buf_alloc, 0);
 
-       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !drm_core_check_feature(dev, DRIVER_MODESET)) {
+               dev->buf_use = 0;
+               atomic_set(&dev->buf_alloc, 0);
+
                i = drm_dma_setup(dev);
                if (i < 0)
                        return i;
@@ -77,16 +68,12 @@ static int drm_setup(struct drm_device * dev)
        for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
                atomic_set(&dev->counts[i], 0);
 
-       drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
-       INIT_LIST_HEAD(&dev->magicfree);
-
        dev->sigdata.lock = NULL;
-       init_waitqueue_head(&dev->lock.lock_queue);
+
        dev->queue_count = 0;
        dev->queue_reserved = 0;
        dev->queue_slots = 0;
        dev->queuelist = NULL;
-       dev->irq_enabled = 0;
        dev->context_flag = 0;
        dev->interrupt_flag = 0;
        dev->dma_flag = 0;
@@ -147,10 +134,20 @@ int drm_open(struct inode *inode, struct file *filp)
                spin_lock(&dev->count_lock);
                if (!dev->open_count++) {
                        spin_unlock(&dev->count_lock);
-                       return drm_setup(dev);
+                       retcode = drm_setup(dev);
+                       goto out;
                }
                spin_unlock(&dev->count_lock);
        }
+out:
+       mutex_lock(&dev->struct_mutex);
+       if (minor->type == DRM_MINOR_LEGACY) {
+               BUG_ON((dev->dev_mapping != NULL) &&
+                       (dev->dev_mapping != inode->i_mapping));
+               if (dev->dev_mapping == NULL)
+                       dev->dev_mapping = inode->i_mapping;
+       }
+       mutex_unlock(&dev->struct_mutex);
 
        return retcode;
 }
@@ -255,6 +252,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
        priv->lock_count = 0;
 
        INIT_LIST_HEAD(&priv->lhead);
+       INIT_LIST_HEAD(&priv->fbs);
 
        if (dev->driver->driver_features & DRIVER_GEM)
                drm_gem_open(dev, priv);
@@ -265,10 +263,42 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
                        goto out_free;
        }
 
+
+       /* if there is no current master make this fd it */
        mutex_lock(&dev->struct_mutex);
-       if (list_empty(&dev->filelist))
-               priv->master = 1;
+       if (!priv->minor->master) {
+               /* create a new master */
+               priv->minor->master = drm_master_create(priv->minor);
+               if (!priv->minor->master) {
+                       ret = -ENOMEM;
+                       goto out_free;
+               }
 
+               priv->is_master = 1;
+               /* take another reference for the copy in the local file priv */
+               priv->master = drm_master_get(priv->minor->master);
+
+               priv->authenticated = 1;
+
+               mutex_unlock(&dev->struct_mutex);
+               if (dev->driver->master_create) {
+                       ret = dev->driver->master_create(dev, priv->master);
+                       if (ret) {
+                               mutex_lock(&dev->struct_mutex);
+                               /* drop both references if this fails */
+                               drm_master_put(&priv->minor->master);
+                               drm_master_put(&priv->master);
+                               mutex_unlock(&dev->struct_mutex);
+                               goto out_free;
+                       }
+               }
+       } else {
+               /* get a reference to the master */
+               priv->master = drm_master_get(priv->minor->master);
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       mutex_lock(&dev->struct_mutex);
        list_add(&priv->lhead, &dev->filelist);
        mutex_unlock(&dev->struct_mutex);
 
@@ -314,6 +344,74 @@ int drm_fasync(int fd, struct file *filp, int on)
 }
 EXPORT_SYMBOL(drm_fasync);
 
+/*
+ * Reclaim locked buffers; note that this may be a bad idea if the current
+ * context doesn't have the hw lock...
+ */
+static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
+{
+       struct drm_file *file_priv = f->private_data;
+
+       if (drm_i_have_hw_lock(dev, file_priv)) {
+               dev->driver->reclaim_buffers_locked(dev, file_priv);
+       } else {
+               unsigned long _end = jiffies + 3 * DRM_HZ;
+               int locked = 0;
+
+               drm_idlelock_take(&file_priv->master->lock);
+
+               /*
+                * Wait for a while.
+                */
+               do {
+                       spin_lock_bh(&file_priv->master->lock.spinlock);
+                       locked = file_priv->master->lock.idle_has_lock;
+                       spin_unlock_bh(&file_priv->master->lock.spinlock);
+                       if (locked)
+                               break;
+                       schedule();
+               } while (!time_after_eq(jiffies, _end));
+
+               if (!locked) {
+                       DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
+                                 "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
+                                 "\tI will go on reclaiming the buffers anyway.\n");
+               }
+
+               dev->driver->reclaim_buffers_locked(dev, file_priv);
+               drm_idlelock_release(&file_priv->master->lock);
+       }
+}
+
+static void drm_master_release(struct drm_device *dev, struct file *filp)
+{
+       struct drm_file *file_priv = filp->private_data;
+
+       if (dev->driver->reclaim_buffers_locked &&
+           file_priv->master->lock.hw_lock)
+               drm_reclaim_locked_buffers(dev, filp);
+
+       if (dev->driver->reclaim_buffers_idlelocked &&
+           file_priv->master->lock.hw_lock) {
+               drm_idlelock_take(&file_priv->master->lock);
+               dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
+               drm_idlelock_release(&file_priv->master->lock);
+       }
+
+
+       if (drm_i_have_hw_lock(dev, file_priv)) {
+               DRM_DEBUG("File %p released, freeing lock for context %d\n",
+                         filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+               drm_lock_free(&file_priv->master->lock,
+                             _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !dev->driver->reclaim_buffers_locked) {
+               dev->driver->reclaim_buffers(dev, file_priv);
+       }
+}
+
 /**
  * Release file.
  *
@@ -348,60 +446,9 @@ int drm_release(struct inode *inode, struct file *filp)
                  (long)old_encode_dev(file_priv->minor->device),
                  dev->open_count);
 
-       if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
-               if (drm_i_have_hw_lock(dev, file_priv)) {
-                       dev->driver->reclaim_buffers_locked(dev, file_priv);
-               } else {
-                       unsigned long endtime = jiffies + 3 * DRM_HZ;
-                       int locked = 0;
-
-                       drm_idlelock_take(&dev->lock);
-
-                       /*
-                        * Wait for a while.
-                        */
-
-                       do{
-                               spin_lock_bh(&dev->lock.spinlock);
-                               locked = dev->lock.idle_has_lock;
-                               spin_unlock_bh(&dev->lock.spinlock);
-                               if (locked)
-                                       break;
-                               schedule();
-                       } while (!time_after_eq(jiffies, endtime));
-
-                       if (!locked) {
-                               DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
-                                         "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
-                                         "\tI will go on reclaiming the buffers anyway.\n");
-                       }
-
-                       dev->driver->reclaim_buffers_locked(dev, file_priv);
-                       drm_idlelock_release(&dev->lock);
-               }
-       }
-
-       if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
-
-               drm_idlelock_take(&dev->lock);
-               dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
-               drm_idlelock_release(&dev->lock);
-
-       }
-
-       if (drm_i_have_hw_lock(dev, file_priv)) {
-               DRM_DEBUG("File %p released, freeing lock for context %d\n",
-                         filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
-
-               drm_lock_free(&dev->lock,
-                             _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
-       }
-
-
-       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
-           !dev->driver->reclaim_buffers_locked) {
-               dev->driver->reclaim_buffers(dev, file_priv);
-       }
+       /* if the master has gone away we can't do anything with the lock */
+       if (file_priv->minor->master)
+               drm_master_release(dev, filp);
 
        if (dev->driver->driver_features & DRIVER_GEM)
                drm_gem_release(dev, file_priv);
@@ -428,12 +475,24 @@ int drm_release(struct inode *inode, struct file *filp)
        mutex_unlock(&dev->ctxlist_mutex);
 
        mutex_lock(&dev->struct_mutex);
-       if (file_priv->remove_auth_on_close == 1) {
+
+       if (file_priv->is_master) {
                struct drm_file *temp;
+               list_for_each_entry(temp, &dev->filelist, lhead) {
+                       if ((temp->master == file_priv->master) &&
+                           (temp != file_priv))
+                               temp->authenticated = 0;
+               }
 
-               list_for_each_entry(temp, &dev->filelist, lhead)
-                       temp->authenticated = 0;
+               if (file_priv->minor->master == file_priv->master) {
+                       /* drop the reference held my the minor */
+                       drm_master_put(&file_priv->minor->master);
+               }
        }
+
+       /* drop the reference held my the file priv */
+       drm_master_put(&file_priv->master);
+       file_priv->is_master = 0;
        list_del(&file_priv->lhead);
        mutex_unlock(&dev->struct_mutex);
 
@@ -448,9 +507,9 @@ int drm_release(struct inode *inode, struct file *filp)
        atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
        spin_lock(&dev->count_lock);
        if (!--dev->open_count) {
-               if (atomic_read(&dev->ioctl_count) || dev->blocked) {
-                       DRM_ERROR("Device busy: %d %d\n",
-                                 atomic_read(&dev->ioctl_count), dev->blocked);
+               if (atomic_read(&dev->ioctl_count)) {
+                       DRM_ERROR("Device busy: %d\n",
+                                 atomic_read(&dev->ioctl_count));
                        spin_unlock(&dev->count_lock);
                        unlock_kernel();
                        return -EBUSY;
index ccd1afdede0249f5109e614732d85e67bbfd8b4b..9da5814528749a82b4ef12a1b13134d34635557b 100644 (file)
  * up at a later date, and as our interface with shmfs for memory allocation.
  */
 
+/*
+ * We make up offsets for buffer objects so we can recognize them at
+ * mmap time.
+ */
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+
 /**
  * Initialize the GEM device fields
  */
@@ -71,6 +78,8 @@
 int
 drm_gem_init(struct drm_device *dev)
 {
+       struct drm_gem_mm *mm;
+
        spin_lock_init(&dev->object_name_lock);
        idr_init(&dev->object_name_idr);
        atomic_set(&dev->object_count, 0);
@@ -79,9 +88,41 @@ drm_gem_init(struct drm_device *dev)
        atomic_set(&dev->pin_memory, 0);
        atomic_set(&dev->gtt_count, 0);
        atomic_set(&dev->gtt_memory, 0);
+
+       mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM);
+       if (!mm) {
+               DRM_ERROR("out of memory\n");
+               return -ENOMEM;
+       }
+
+       dev->mm_private = mm;
+
+       if (drm_ht_create(&mm->offset_hash, 19)) {
+               drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
+               return -ENOMEM;
+       }
+
+       if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+                       DRM_FILE_PAGE_OFFSET_SIZE)) {
+               drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
+               drm_ht_remove(&mm->offset_hash);
+               return -ENOMEM;
+       }
+
        return 0;
 }
 
+void
+drm_gem_destroy(struct drm_device *dev)
+{
+       struct drm_gem_mm *mm = dev->mm_private;
+
+       drm_mm_takedown(&mm->offset_manager);
+       drm_ht_remove(&mm->offset_hash);
+       drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
+       dev->mm_private = NULL;
+}
+
 /**
  * Allocate a GEM object of the specified size with shmfs backing store
  */
@@ -419,3 +460,73 @@ drm_gem_object_handle_free(struct kref *kref)
 }
 EXPORT_SYMBOL(drm_gem_object_handle_free);
 
+/**
+ * drm_gem_mmap - memory map routine for GEM objects
+ * @filp: DRM file pointer
+ * @vma: VMA for the area to be mapped
+ *
+ * If a driver supports GEM object mapping, mmap calls on the DRM file
+ * descriptor will end up here.
+ *
+ * If we find the object based on the offset passed in (vma->vm_pgoff will
+ * contain the fake offset we created when the GTT map ioctl was called on
+ * the object), we set up the driver fault handler so that any accesses
+ * to the object can be trapped, to perform migration, GTT binding, surface
+ * register allocation, or performance monitoring.
+ */
+int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct drm_gem_mm *mm = dev->mm_private;
+       struct drm_map *map = NULL;
+       struct drm_gem_object *obj;
+       struct drm_hash_item *hash;
+       unsigned long prot;
+       int ret = 0;
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+               mutex_unlock(&dev->struct_mutex);
+               return drm_mmap(filp, vma);
+       }
+
+       map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+       if (!map ||
+           ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
+               ret =  -EPERM;
+               goto out_unlock;
+       }
+
+       /* Check for valid size. */
+       if (map->size < vma->vm_end - vma->vm_start) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       obj = map->handle;
+       if (!obj->dev->driver->gem_vm_ops) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+       vma->vm_ops = obj->dev->driver->gem_vm_ops;
+       vma->vm_private_data = map->handle;
+       /* FIXME: use pgprot_writecombine when available */
+       prot = pgprot_val(vma->vm_page_prot);
+#ifdef CONFIG_X86
+       prot |= _PAGE_CACHE_WC;
+#endif
+       vma->vm_page_prot = __pgprot(prot);
+
+       vma->vm_file = filp;    /* Needed for drm_vm_open() */
+       drm_vm_open_locked(vma);
+
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_gem_mmap);
index 33160673a7b7846406400226665ae68c409e5e73..af539f7d87dd2639bafe0c1576e069d4c971da1e 100644 (file)
@@ -127,6 +127,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
        }
        return 0;
 }
+EXPORT_SYMBOL(drm_ht_insert_item);
 
 /*
  * Just insert an item and return any "bits" bit key that hasn't been
@@ -188,6 +189,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
        ht->fill--;
        return 0;
 }
+EXPORT_SYMBOL(drm_ht_remove_item);
 
 void drm_ht_remove(struct drm_open_hash *ht)
 {
index 16829fb3089d8adbfadda119d427e65bcaa8157c..1fad76289e665d0cfbc6f7026f6f568ff6809a53 100644 (file)
@@ -53,12 +53,13 @@ int drm_getunique(struct drm_device *dev, void *data,
                  struct drm_file *file_priv)
 {
        struct drm_unique *u = data;
+       struct drm_master *master = file_priv->master;
 
-       if (u->unique_len >= dev->unique_len) {
-               if (copy_to_user(u->unique, dev->unique, dev->unique_len))
+       if (u->unique_len >= master->unique_len) {
+               if (copy_to_user(u->unique, master->unique, master->unique_len))
                        return -EFAULT;
        }
-       u->unique_len = dev->unique_len;
+       u->unique_len = master->unique_len;
 
        return 0;
 }
@@ -81,36 +82,38 @@ int drm_setunique(struct drm_device *dev, void *data,
                  struct drm_file *file_priv)
 {
        struct drm_unique *u = data;
+       struct drm_master *master = file_priv->master;
        int domain, bus, slot, func, ret;
 
-       if (dev->unique_len || dev->unique)
+       if (master->unique_len || master->unique)
                return -EBUSY;
 
        if (!u->unique_len || u->unique_len > 1024)
                return -EINVAL;
 
-       dev->unique_len = u->unique_len;
-       dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER);
-       if (!dev->unique)
+       master->unique_len = u->unique_len;
+       master->unique_size = u->unique_len + 1;
+       master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER);
+       if (!master->unique)
                return -ENOMEM;
-       if (copy_from_user(dev->unique, u->unique, dev->unique_len))
+       if (copy_from_user(master->unique, u->unique, master->unique_len))
                return -EFAULT;
 
-       dev->unique[dev->unique_len] = '\0';
+       master->unique[master->unique_len] = '\0';
 
        dev->devname =
            drm_alloc(strlen(dev->driver->pci_driver.name) +
-                     strlen(dev->unique) + 2, DRM_MEM_DRIVER);
+                     strlen(master->unique) + 2, DRM_MEM_DRIVER);
        if (!dev->devname)
                return -ENOMEM;
 
        sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
-               dev->unique);
+               master->unique);
 
        /* Return error if the busid submitted doesn't match the device's actual
         * busid.
         */
-       ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+       ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
        if (ret != 3)
                return -EINVAL;
        domain = bus >> 8;
@@ -125,34 +128,38 @@ int drm_setunique(struct drm_device *dev, void *data,
        return 0;
 }
 
-static int drm_set_busid(struct drm_device * dev)
+static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
 {
+       struct drm_master *master = file_priv->master;
        int len;
 
-       if (dev->unique != NULL)
-               return 0;
+       if (master->unique != NULL)
+               return -EBUSY;
 
-       dev->unique_len = 40;
-       dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
-       if (dev->unique == NULL)
+       master->unique_len = 40;
+       master->unique_size = master->unique_len;
+       master->unique = drm_alloc(master->unique_size, DRM_MEM_DRIVER);
+       if (master->unique == NULL)
                return -ENOMEM;
 
-       len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
-                      drm_get_pci_domain(dev), dev->pdev->bus->number,
+       len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d",
+                      drm_get_pci_domain(dev),
+                      dev->pdev->bus->number,
                       PCI_SLOT(dev->pdev->devfn),
                       PCI_FUNC(dev->pdev->devfn));
-
-       if (len > dev->unique_len)
-               DRM_ERROR("Unique buffer overflowed\n");
+       if (len >= master->unique_len)
+               DRM_ERROR("buffer overflow");
+       else
+               master->unique_len = len;
 
        dev->devname =
-           drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len +
+           drm_alloc(strlen(dev->driver->pci_driver.name) + master->unique_len +
                      2, DRM_MEM_DRIVER);
        if (dev->devname == NULL)
                return -ENOMEM;
 
        sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
-               dev->unique);
+               master->unique);
 
        return 0;
 }
@@ -276,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
        for (i = 0; i < dev->counters; i++) {
                if (dev->types[i] == _DRM_STAT_LOCK)
                        stats->data[i].value =
-                           (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
+                           (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
                else
                        stats->data[i].value = atomic_read(&dev->counts[i]);
                stats->data[i].type = dev->types[i];
@@ -318,7 +325,7 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
                        /*
                         * Version 1.1 includes tying of DRM to specific device
                         */
-                       drm_set_busid(dev);
+                       drm_set_busid(dev, file_priv);
                }
        }
 
index 1e787f894b3cf38f6c46eeda39c73cb8a49ad1c5..724e505873cf19e77c6710968a1a8ba6888cedd2 100644 (file)
@@ -116,6 +116,9 @@ void drm_vblank_cleanup(struct drm_device *dev)
                 dev->num_crtcs, DRM_MEM_DRIVER);
        drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
                 DRM_MEM_DRIVER);
+       drm_free(dev->last_vblank_wait,
+                sizeof(*dev->last_vblank_wait) * dev->num_crtcs,
+                DRM_MEM_DRIVER);
        drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
                 dev->num_crtcs, DRM_MEM_DRIVER);
 
@@ -161,6 +164,11 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
        if (!dev->last_vblank)
                goto err;
 
+       dev->last_vblank_wait = drm_calloc(num_crtcs, sizeof(u32),
+                                          DRM_MEM_DRIVER);
+       if (!dev->last_vblank_wait)
+               goto err;
+
        dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
                                         DRM_MEM_DRIVER);
        if (!dev->vblank_inmodeset)
@@ -305,6 +313,8 @@ int drm_control(struct drm_device *dev, void *data,
        case DRM_INST_HANDLER:
                if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
                        return 0;
+               if (drm_core_check_feature(dev, DRIVER_MODESET))
+                       return 0;
                if (dev->if_version < DRM_IF_VERSION(1, 2) &&
                    ctl->irq != dev->pdev->irq)
                        return -EINVAL;
@@ -312,6 +322,8 @@ int drm_control(struct drm_device *dev, void *data,
        case DRM_UNINST_HANDLER:
                if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
                        return 0;
+               if (drm_core_check_feature(dev, DRIVER_MODESET))
+                       return 0;
                return drm_irq_uninstall(dev);
        default:
                return -EINVAL;
@@ -426,6 +438,45 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
 }
 EXPORT_SYMBOL(drm_vblank_put);
 
+/**
+ * drm_vblank_pre_modeset - account for vblanks across mode sets
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @post: post or pre mode set?
+ *
+ * Account for vblank events across mode setting events, which will likely
+ * reset the hardware frame counter.
+ */
+void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+{
+       /*
+        * To avoid all the problems that might happen if interrupts
+        * were enabled/disabled around or between these calls, we just
+        * have the kernel take a reference on the CRTC (just once though
+        * to avoid corrupting the count if multiple, mismatch calls occur),
+        * so that interrupts remain enabled in the interim.
+        */
+       if (!dev->vblank_inmodeset[crtc]) {
+               dev->vblank_inmodeset[crtc] = 1;
+               drm_vblank_get(dev, crtc);
+       }
+}
+EXPORT_SYMBOL(drm_vblank_pre_modeset);
+
+void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+{
+       unsigned long irqflags;
+
+       if (dev->vblank_inmodeset[crtc]) {
+               spin_lock_irqsave(&dev->vbl_lock, irqflags);
+               dev->vblank_disable_allowed = 1;
+               dev->vblank_inmodeset[crtc] = 0;
+               spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+               drm_vblank_put(dev, crtc);
+       }
+}
+EXPORT_SYMBOL(drm_vblank_post_modeset);
+
 /**
  * drm_modeset_ctl - handle vblank event counter changes across mode switch
  * @DRM_IOCTL_ARGS: standard ioctl arguments
@@ -441,7 +492,6 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
                    struct drm_file *file_priv)
 {
        struct drm_modeset_ctl *modeset = data;
-       unsigned long irqflags;
        int crtc, ret = 0;
 
        /* If drm_vblank_init() hasn't been called yet, just no-op */
@@ -454,28 +504,12 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       /*
-        * To avoid all the problems that might happen if interrupts
-        * were enabled/disabled around or between these calls, we just
-        * have the kernel take a reference on the CRTC (just once though
-        * to avoid corrupting the count if multiple, mismatch calls occur),
-        * so that interrupts remain enabled in the interim.
-        */
        switch (modeset->cmd) {
        case _DRM_PRE_MODESET:
-               if (!dev->vblank_inmodeset[crtc]) {
-                       dev->vblank_inmodeset[crtc] = 1;
-                       drm_vblank_get(dev, crtc);
-               }
+               drm_vblank_pre_modeset(dev, crtc);
                break;
        case _DRM_POST_MODESET:
-               if (dev->vblank_inmodeset[crtc]) {
-                       spin_lock_irqsave(&dev->vbl_lock, irqflags);
-                       dev->vblank_disable_allowed = 1;
-                       dev->vblank_inmodeset[crtc] = 0;
-                       spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-                       drm_vblank_put(dev, crtc);
-               }
+               drm_vblank_post_modeset(dev, crtc);
                break;
        default:
                ret = -EINVAL;
@@ -616,6 +650,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
        } else {
                DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
                          vblwait->request.sequence, crtc);
+               dev->last_vblank_wait[crtc] = vblwait->request.sequence;
                DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
                            ((drm_vblank_count(dev, crtc)
                              - vblwait->request.sequence) <= (1 << 23)));
index 1cfa72031f8f6291f18fce595c0c7e179cea07d5..46e7b28f0707397d545948dc21bbcc272f16eb6f 100644 (file)
@@ -52,6 +52,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        DECLARE_WAITQUEUE(entry, current);
        struct drm_lock *lock = data;
+       struct drm_master *master = file_priv->master;
        int ret = 0;
 
        ++file_priv->lock_count;
@@ -64,26 +65,27 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 
        DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
                  lock->context, task_pid_nr(current),
-                 dev->lock.hw_lock->lock, lock->flags);
+                 master->lock.hw_lock->lock, lock->flags);
 
        if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
                if (lock->context < 0)
                        return -EINVAL;
 
-       add_wait_queue(&dev->lock.lock_queue, &entry);
-       spin_lock_bh(&dev->lock.spinlock);
-       dev->lock.user_waiters++;
-       spin_unlock_bh(&dev->lock.spinlock);
+       add_wait_queue(&master->lock.lock_queue, &entry);
+       spin_lock_bh(&master->lock.spinlock);
+       master->lock.user_waiters++;
+       spin_unlock_bh(&master->lock.spinlock);
+
        for (;;) {
                __set_current_state(TASK_INTERRUPTIBLE);
-               if (!dev->lock.hw_lock) {
+               if (!master->lock.hw_lock) {
                        /* Device has been unregistered */
                        ret = -EINTR;
                        break;
                }
-               if (drm_lock_take(&dev->lock, lock->context)) {
-                       dev->lock.file_priv = file_priv;
-                       dev->lock.lock_time = jiffies;
+               if (drm_lock_take(&master->lock, lock->context)) {
+                       master->lock.file_priv = file_priv;
+                       master->lock.lock_time = jiffies;
                        atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
                        break;  /* Got lock */
                }
@@ -95,11 +97,11 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
                        break;
                }
        }
-       spin_lock_bh(&dev->lock.spinlock);
-       dev->lock.user_waiters--;
-       spin_unlock_bh(&dev->lock.spinlock);
+       spin_lock_bh(&master->lock.spinlock);
+       master->lock.user_waiters--;
+       spin_unlock_bh(&master->lock.spinlock);
        __set_current_state(TASK_RUNNING);
-       remove_wait_queue(&dev->lock.lock_queue, &entry);
+       remove_wait_queue(&master->lock.lock_queue, &entry);
 
        DRM_DEBUG("%d %s\n", lock->context,
                  ret ? "interrupted" : "has lock");
@@ -108,14 +110,14 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
        /* don't set the block all signals on the master process for now 
         * really probably not the correct answer but lets us debug xkb
         * xserver for now */
-       if (!file_priv->master) {
+       if (!file_priv->is_master) {
                sigemptyset(&dev->sigmask);
                sigaddset(&dev->sigmask, SIGSTOP);
                sigaddset(&dev->sigmask, SIGTSTP);
                sigaddset(&dev->sigmask, SIGTTIN);
                sigaddset(&dev->sigmask, SIGTTOU);
                dev->sigdata.context = lock->context;
-               dev->sigdata.lock = dev->lock.hw_lock;
+               dev->sigdata.lock = master->lock.hw_lock;
                block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
        }
 
@@ -154,6 +156,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        struct drm_lock *lock = data;
+       struct drm_master *master = file_priv->master;
 
        if (lock->context == DRM_KERNEL_CONTEXT) {
                DRM_ERROR("Process %d using kernel context %d\n",
@@ -169,7 +172,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
        if (dev->driver->kernel_context_switch_unlock)
                dev->driver->kernel_context_switch_unlock(dev);
        else {
-               if (drm_lock_free(&dev->lock,lock->context)) {
+               if (drm_lock_free(&master->lock, lock->context)) {
                        /* FIXME: Should really bail out here. */
                }
        }
@@ -379,9 +382,10 @@ EXPORT_SYMBOL(drm_idlelock_release);
 
 int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
 {
-       return (file_priv->lock_count && dev->lock.hw_lock &&
-               _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
-               dev->lock.file_priv == file_priv);
+       struct drm_master *master = file_priv->master;
+       return (file_priv->lock_count && master->lock.hw_lock &&
+               _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
+               master->lock.file_priv == file_priv);
 }
 
 EXPORT_SYMBOL(drm_i_have_hw_lock);
index 217ad7dc70765ef174ad159c8e769fa10392c531..367c590ffbba2d83b5f516d76cb6efacdd1baff1 100644 (file)
@@ -296,3 +296,4 @@ void drm_mm_takedown(struct drm_mm * mm)
 
        drm_free(entry, sizeof(*entry), DRM_MEM_MM);
 }
+EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
new file mode 100644 (file)
index 0000000..c9b80fd
--- /dev/null
@@ -0,0 +1,576 @@
+/*
+ * The list_sort function is (presumably) licensed under the GPL (see the
+ * top level "COPYING" file for details).
+ *
+ * The remainder of this file is:
+ *
+ * Copyright Â© 1997-2003 by The XFree86 Project, Inc.
+ * Copyright Â© 2007 Dave Airlie
+ * Copyright Â© 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of the copyright holder(s)
+ * and author(s) shall not be used in advertising or otherwise to promote
+ * the sale, use or other dealings in this Software without prior written
+ * authorization from the copyright holder(s) and author(s).
+ */
+
+#include <linux/list.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+
+/**
+ * drm_mode_debug_printmodeline - debug print a mode
+ * @dev: DRM device
+ * @mode: mode to print
+ *
+ * LOCKING:
+ * None.
+ *
+ * Describe @mode using DRM_DEBUG.
+ */
+void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+{
+       DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
+                 mode->base.id, mode->name, mode->vrefresh, mode->clock,
+                 mode->hdisplay, mode->hsync_start,
+                 mode->hsync_end, mode->htotal,
+                 mode->vdisplay, mode->vsync_start,
+                 mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+}
+EXPORT_SYMBOL(drm_mode_debug_printmodeline);
+
+/**
+ * drm_mode_set_name - set the name on a mode
+ * @mode: name will be set in this mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Set the name of @mode to a standard format.
+ */
+void drm_mode_set_name(struct drm_display_mode *mode)
+{
+       snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
+                mode->vdisplay);
+}
+EXPORT_SYMBOL(drm_mode_set_name);
+
+/**
+ * drm_mode_list_concat - move modes from one list to another
+ * @head: source list
+ * @new: dst list
+ *
+ * LOCKING:
+ * Caller must ensure both lists are locked.
+ *
+ * Move all the modes from @head to @new.
+ */
+void drm_mode_list_concat(struct list_head *head, struct list_head *new)
+{
+
+       struct list_head *entry, *tmp;
+
+       list_for_each_safe(entry, tmp, head) {
+               list_move_tail(entry, new);
+       }
+}
+EXPORT_SYMBOL(drm_mode_list_concat);
+
+/**
+ * drm_mode_width - get the width of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's width (hdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->hdisplay
+ */
+int drm_mode_width(struct drm_display_mode *mode)
+{
+       return mode->hdisplay;
+
+}
+EXPORT_SYMBOL(drm_mode_width);
+
+/**
+ * drm_mode_height - get the height of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's height (vdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->vdisplay
+ */
+int drm_mode_height(struct drm_display_mode *mode)
+{
+       return mode->vdisplay;
+}
+EXPORT_SYMBOL(drm_mode_height);
+
+/**
+ * drm_mode_vrefresh - get the vrefresh of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's vrefresh rate or calculate it if necessary.
+ *
+ * FIXME: why is this needed?  shouldn't vrefresh be set already?
+ *
+ * RETURNS:
+ * Vertical refresh rate of @mode x 1000. For precision reasons.
+ */
+int drm_mode_vrefresh(struct drm_display_mode *mode)
+{
+       int refresh = 0;
+       unsigned int calc_val;
+
+       if (mode->vrefresh > 0)
+               refresh = mode->vrefresh;
+       else if (mode->htotal > 0 && mode->vtotal > 0) {
+               /* work out vrefresh the value will be x1000 */
+               calc_val = (mode->clock * 1000);
+
+               calc_val /= mode->htotal;
+               calc_val *= 1000;
+               calc_val /= mode->vtotal;
+
+               refresh = calc_val;
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       refresh *= 2;
+               if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+                       refresh /= 2;
+               if (mode->vscan > 1)
+                       refresh /= mode->vscan;
+       }
+       return refresh;
+}
+EXPORT_SYMBOL(drm_mode_vrefresh);
+
+/**
+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * @p: mode
+ * @adjust_flags: unused? (FIXME)
+ *
+ * LOCKING:
+ * None.
+ *
+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ */
+void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
+{
+       if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
+               return;
+
+       p->crtc_hdisplay = p->hdisplay;
+       p->crtc_hsync_start = p->hsync_start;
+       p->crtc_hsync_end = p->hsync_end;
+       p->crtc_htotal = p->htotal;
+       p->crtc_hskew = p->hskew;
+       p->crtc_vdisplay = p->vdisplay;
+       p->crtc_vsync_start = p->vsync_start;
+       p->crtc_vsync_end = p->vsync_end;
+       p->crtc_vtotal = p->vtotal;
+
+       if (p->flags & DRM_MODE_FLAG_INTERLACE) {
+               if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
+                       p->crtc_vdisplay /= 2;
+                       p->crtc_vsync_start /= 2;
+                       p->crtc_vsync_end /= 2;
+                       p->crtc_vtotal /= 2;
+               }
+
+               p->crtc_vtotal |= 1;
+       }
+
+       if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+               p->crtc_vdisplay *= 2;
+               p->crtc_vsync_start *= 2;
+               p->crtc_vsync_end *= 2;
+               p->crtc_vtotal *= 2;
+       }
+
+       if (p->vscan > 1) {
+               p->crtc_vdisplay *= p->vscan;
+               p->crtc_vsync_start *= p->vscan;
+               p->crtc_vsync_end *= p->vscan;
+               p->crtc_vtotal *= p->vscan;
+       }
+
+       p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
+       p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
+       p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
+       p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
+
+       p->crtc_hadjusted = false;
+       p->crtc_vadjusted = false;
+}
+EXPORT_SYMBOL(drm_mode_set_crtcinfo);
+
+
+/**
+ * drm_mode_duplicate - allocate and duplicate an existing mode
+ * @m: mode to duplicate
+ *
+ * LOCKING:
+ * None.
+ *
+ * Just allocate a new mode, copy the existing mode into it, and return
+ * a pointer to it.  Used to create new instances of established modes.
+ */
+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+                                           struct drm_display_mode *mode)
+{
+       struct drm_display_mode *nmode;
+       int new_id;
+
+       nmode = drm_mode_create(dev);
+       if (!nmode)
+               return NULL;
+
+       new_id = nmode->base.id;
+       *nmode = *mode;
+       nmode->base.id = new_id;
+       INIT_LIST_HEAD(&nmode->head);
+       return nmode;
+}
+EXPORT_SYMBOL(drm_mode_duplicate);
+
+/**
+ * drm_mode_equal - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+{
+       /* do clock check convert to PICOS so fb modes get matched
+        * the same */
+       if (mode1->clock && mode2->clock) {
+               if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
+                       return false;
+       } else if (mode1->clock != mode2->clock)
+               return false;
+
+       if (mode1->hdisplay == mode2->hdisplay &&
+           mode1->hsync_start == mode2->hsync_start &&
+           mode1->hsync_end == mode2->hsync_end &&
+           mode1->htotal == mode2->htotal &&
+           mode1->hskew == mode2->hskew &&
+           mode1->vdisplay == mode2->vdisplay &&
+           mode1->vsync_start == mode2->vsync_start &&
+           mode1->vsync_end == mode2->vsync_end &&
+           mode1->vtotal == mode2->vtotal &&
+           mode1->vscan == mode2->vscan &&
+           mode1->flags == mode2->flags)
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL(drm_mode_equal);
+
+/**
+ * drm_mode_validate_size - make sure modes adhere to size constraints
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @maxX: maximum width
+ * @maxY: maximum height
+ * @maxPitch: max pitch
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * The DRM device (@dev) has size and pitch limits.  Here we validate the
+ * modes we probed for @dev against those limits and set their status as
+ * necessary.
+ */
+void drm_mode_validate_size(struct drm_device *dev,
+                           struct list_head *mode_list,
+                           int maxX, int maxY, int maxPitch)
+{
+       struct drm_display_mode *mode;
+
+       list_for_each_entry(mode, mode_list, head) {
+               if (maxPitch > 0 && mode->hdisplay > maxPitch)
+                       mode->status = MODE_BAD_WIDTH;
+
+               if (maxX > 0 && mode->hdisplay > maxX)
+                       mode->status = MODE_VIRTUAL_X;
+
+               if (maxY > 0 && mode->vdisplay > maxY)
+                       mode->status = MODE_VIRTUAL_Y;
+       }
+}
+EXPORT_SYMBOL(drm_mode_validate_size);
+
+/**
+ * drm_mode_validate_clocks - validate modes against clock limits
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @min: minimum clock rate array
+ * @max: maximum clock rate array
+ * @n_ranges: number of clock ranges (size of arrays)
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Some code may need to check a mode list against the clock limits of the
+ * device in question.  This function walks the mode list, testing to make
+ * sure each mode falls within a given range (defined by @min and @max
+ * arrays) and sets @mode->status as needed.
+ */
+void drm_mode_validate_clocks(struct drm_device *dev,
+                             struct list_head *mode_list,
+                             int *min, int *max, int n_ranges)
+{
+       struct drm_display_mode *mode;
+       int i;
+
+       list_for_each_entry(mode, mode_list, head) {
+               bool good = false;
+               for (i = 0; i < n_ranges; i++) {
+                       if (mode->clock >= min[i] && mode->clock <= max[i]) {
+                               good = true;
+                               break;
+                       }
+               }
+               if (!good)
+                       mode->status = MODE_CLOCK_RANGE;
+       }
+}
+EXPORT_SYMBOL(drm_mode_validate_clocks);
+
+/**
+ * drm_mode_prune_invalid - remove invalid modes from mode list
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @verbose: be verbose about it
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Once mode list generation is complete, a caller can use this routine to
+ * remove invalid modes from a mode list.  If any of the modes have a
+ * status other than %MODE_OK, they are removed from @mode_list and freed.
+ */
+void drm_mode_prune_invalid(struct drm_device *dev,
+                           struct list_head *mode_list, bool verbose)
+{
+       struct drm_display_mode *mode, *t;
+
+       list_for_each_entry_safe(mode, t, mode_list, head) {
+               if (mode->status != MODE_OK) {
+                       list_del(&mode->head);
+                       if (verbose) {
+                               drm_mode_debug_printmodeline(mode);
+                               DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status);
+                       }
+                       drm_mode_destroy(dev, mode);
+               }
+       }
+}
+EXPORT_SYMBOL(drm_mode_prune_invalid);
+
+/**
+ * drm_mode_compare - compare modes for favorability
+ * @lh_a: list_head for first mode
+ * @lh_b: list_head for second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
+ * which is better.
+ *
+ * RETURNS:
+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+ * positive if @lh_b is better than @lh_a.
+ */
+static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
+{
+       struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
+       struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
+       int diff;
+
+       diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
+               ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
+       if (diff)
+               return diff;
+       diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
+       if (diff)
+               return diff;
+       diff = b->clock - a->clock;
+       return diff;
+}
+
+/* FIXME: what we don't have a list sort function? */
+/* list sort from Mark J Roberts (mjr@znex.org) */
+void list_sort(struct list_head *head,
+              int (*cmp)(struct list_head *a, struct list_head *b))
+{
+       struct list_head *p, *q, *e, *list, *tail, *oldhead;
+       int insize, nmerges, psize, qsize, i;
+
+       list = head->next;
+       list_del(head);
+       insize = 1;
+       for (;;) {
+               p = oldhead = list;
+               list = tail = NULL;
+               nmerges = 0;
+
+               while (p) {
+                       nmerges++;
+                       q = p;
+                       psize = 0;
+                       for (i = 0; i < insize; i++) {
+                               psize++;
+                               q = q->next == oldhead ? NULL : q->next;
+                               if (!q)
+                                       break;
+                       }
+
+                       qsize = insize;
+                       while (psize > 0 || (qsize > 0 && q)) {
+                               if (!psize) {
+                                       e = q;
+                                       q = q->next;
+                                       qsize--;
+                                       if (q == oldhead)
+                                               q = NULL;
+                               } else if (!qsize || !q) {
+                                       e = p;
+                                       p = p->next;
+                                       psize--;
+                                       if (p == oldhead)
+                                               p = NULL;
+                               } else if (cmp(p, q) <= 0) {
+                                       e = p;
+                                       p = p->next;
+                                       psize--;
+                                       if (p == oldhead)
+                                               p = NULL;
+                               } else {
+                                       e = q;
+                                       q = q->next;
+                                       qsize--;
+                                       if (q == oldhead)
+                                               q = NULL;
+                               }
+                               if (tail)
+                                       tail->next = e;
+                               else
+                                       list = e;
+                               e->prev = tail;
+                               tail = e;
+                       }
+                       p = q;
+               }
+
+               tail->next = list;
+               list->prev = tail;
+
+               if (nmerges <= 1)
+                       break;
+
+               insize *= 2;
+       }
+
+       head->next = list;
+       head->prev = list->prev;
+       list->prev->next = head;
+       list->prev = head;
+}
+
+/**
+ * drm_mode_sort - sort mode list
+ * @mode_list: list to sort
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Sort @mode_list by favorability, putting good modes first.
+ */
+void drm_mode_sort(struct list_head *mode_list)
+{
+       list_sort(mode_list, drm_mode_compare);
+}
+EXPORT_SYMBOL(drm_mode_sort);
+
+/**
+ * drm_mode_connector_list_update - update the mode list for the connector
+ * @connector: the connector to update
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * This moves the modes from the @connector probed_modes list
+ * to the actual mode list. It compares the probed mode against the current
+ * list and only adds different modes. All modes unverified after this point
+ * will be removed by the prune invalid modes.
+ */
+void drm_mode_connector_list_update(struct drm_connector *connector)
+{
+       struct drm_display_mode *mode;
+       struct drm_display_mode *pmode, *pt;
+       int found_it;
+
+       list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
+                                head) {
+               found_it = 0;
+               /* go through current modes checking for the new probed mode */
+               list_for_each_entry(mode, &connector->modes, head) {
+                       if (drm_mode_equal(pmode, mode)) {
+                               found_it = 1;
+                               /* if equal delete the probed mode */
+                               mode->status = pmode->status;
+                               list_del(&pmode->head);
+                               drm_mode_destroy(connector->dev, pmode);
+                               break;
+                       }
+               }
+
+               if (!found_it) {
+                       list_move_tail(&pmode->head, &connector->modes);
+               }
+       }
+}
+EXPORT_SYMBOL(drm_mode_connector_list_update);
index ae73b7f7249ad36059dde96219fe2fb33022b082..8df849f66830fbd50bc371fb13abf04360afa914 100644 (file)
@@ -49,6 +49,8 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
                           int request, int *eof, void *data);
 static int drm_bufs_info(char *buf, char **start, off_t offset,
                         int request, int *eof, void *data);
+static int drm_vblank_info(char *buf, char **start, off_t offset,
+                          int request, int *eof, void *data);
 static int drm_gem_name_info(char *buf, char **start, off_t offset,
                             int request, int *eof, void *data);
 static int drm_gem_object_info(char *buf, char **start, off_t offset,
@@ -72,6 +74,7 @@ static struct drm_proc_list {
        {"clients", drm_clients_info, 0},
        {"queues", drm_queues_info, 0},
        {"bufs", drm_bufs_info, 0},
+       {"vblank", drm_vblank_info, 0},
        {"gem_names", drm_gem_name_info, DRIVER_GEM},
        {"gem_objects", drm_gem_object_info, DRIVER_GEM},
 #if DRM_DEBUG_CODE
@@ -195,6 +198,7 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
                         int *eof, void *data)
 {
        struct drm_minor *minor = (struct drm_minor *) data;
+       struct drm_master *master = minor->master;
        struct drm_device *dev = minor->dev;
        int len = 0;
 
@@ -203,13 +207,16 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
                return 0;
        }
 
+       if (!master)
+               return 0;
+
        *start = &buf[offset];
        *eof = 0;
 
-       if (dev->unique) {
+       if (master->unique) {
                DRM_PROC_PRINT("%s %s %s\n",
                               dev->driver->pci_driver.name,
-                              pci_name(dev->pdev), dev->unique);
+                              pci_name(dev->pdev), master->unique);
        } else {
                DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
                               pci_name(dev->pdev));
@@ -453,6 +460,66 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
        return ret;
 }
 
+/**
+ * Called when "/proc/dri/.../vblank" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ */
+static int drm__vblank_info(char *buf, char **start, off_t offset, int request,
+                         int *eof, void *data)
+{
+       struct drm_minor *minor = (struct drm_minor *) data;
+       struct drm_device *dev = minor->dev;
+       int len = 0;
+       int crtc;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+
+       for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
+               DRM_PROC_PRINT("CRTC %d enable:     %d\n",
+                              crtc, atomic_read(&dev->vblank_refcount[crtc]));
+               DRM_PROC_PRINT("CRTC %d counter:    %d\n",
+                              crtc, drm_vblank_count(dev, crtc));
+               DRM_PROC_PRINT("CRTC %d last wait:  %d\n",
+                              crtc, dev->last_vblank_wait[crtc]);
+               DRM_PROC_PRINT("CRTC %d in modeset: %d\n",
+                              crtc, dev->vblank_inmodeset[crtc]);
+       }
+
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
+/**
+ * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock.
+ */
+static int drm_vblank_info(char *buf, char **start, off_t offset, int request,
+                        int *eof, void *data)
+{
+       struct drm_minor *minor = (struct drm_minor *) data;
+       struct drm_device *dev = minor->dev;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm__vblank_info(buf, start, offset, request, eof, data);
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
 /**
  * Called when "/proc/dri/.../clients" is read.
  *
index 66c96ec66672ca2746dc70709772d8c780aea426..5ca132afa4f2e128999e319e44e31ad156e6ab74 100644 (file)
@@ -57,6 +57,14 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
        int ret;
        int base = 0, limit = 63;
 
+       if (type == DRM_MINOR_CONTROL) {
+                base += 64;
+                limit = base + 127;
+        } else if (type == DRM_MINOR_RENDER) {
+                base += 128;
+                limit = base + 255;
+        }
+
 again:
        if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
                DRM_ERROR("Out of memory expanding drawable idr\n");
@@ -79,6 +87,104 @@ again:
        return new_id;
 }
 
+struct drm_master *drm_master_create(struct drm_minor *minor)
+{
+       struct drm_master *master;
+
+       master = drm_calloc(1, sizeof(*master), DRM_MEM_DRIVER);
+       if (!master)
+               return NULL;
+
+       kref_init(&master->refcount);
+       spin_lock_init(&master->lock.spinlock);
+       init_waitqueue_head(&master->lock.lock_queue);
+       drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
+       INIT_LIST_HEAD(&master->magicfree);
+       master->minor = minor;
+
+       list_add_tail(&master->head, &minor->master_list);
+
+       return master;
+}
+
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+       kref_get(&master->refcount);
+       return master;
+}
+
+static void drm_master_destroy(struct kref *kref)
+{
+       struct drm_master *master = container_of(kref, struct drm_master, refcount);
+       struct drm_magic_entry *pt, *next;
+       struct drm_device *dev = master->minor->dev;
+
+       list_del(&master->head);
+
+       if (dev->driver->master_destroy)
+               dev->driver->master_destroy(dev, master);
+
+       if (master->unique) {
+               drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER);
+               master->unique = NULL;
+               master->unique_len = 0;
+       }
+
+       list_for_each_entry_safe(pt, next, &master->magicfree, head) {
+               list_del(&pt->head);
+               drm_ht_remove_item(&master->magiclist, &pt->hash_item);
+               drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+       }
+
+       drm_ht_remove(&master->magiclist);
+
+       if (master->lock.hw_lock) {
+               if (dev->sigdata.lock == master->lock.hw_lock)
+                       dev->sigdata.lock = NULL;
+               master->lock.hw_lock = NULL;
+               master->lock.file_priv = NULL;
+               wake_up_interruptible(&master->lock.lock_queue);
+       }
+
+       drm_free(master, sizeof(*master), DRM_MEM_DRIVER);
+}
+
+void drm_master_put(struct drm_master **master)
+{
+       kref_put(&(*master)->refcount, drm_master_destroy);
+       *master = NULL;
+}
+
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
+               return -EINVAL;
+
+       if (!file_priv->master)
+               return -EINVAL;
+
+       if (!file_priv->minor->master &&
+           file_priv->minor->master != file_priv->master) {
+               mutex_lock(&dev->struct_mutex);
+               file_priv->minor->master = drm_master_get(file_priv->master);
+               mutex_lock(&dev->struct_mutex);
+       }
+
+       return 0;
+}
+
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       if (!file_priv->master)
+               return -EINVAL;
+       mutex_lock(&dev->struct_mutex);
+       drm_master_put(&file_priv->minor->master);
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+}
+
 static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
                           const struct pci_device_id *ent,
                           struct drm_driver *driver)
@@ -92,7 +198,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
 
        spin_lock_init(&dev->count_lock);
        spin_lock_init(&dev->drw_lock);
-       spin_lock_init(&dev->lock.spinlock);
        init_timer(&dev->timer);
        mutex_init(&dev->struct_mutex);
        mutex_init(&dev->ctxlist_mutex);
@@ -140,9 +245,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
                }
        }
 
-       if (dev->driver->load)
-               if ((retcode = dev->driver->load(dev, ent->driver_data)))
-                       goto error_out_unreg;
 
        retcode = drm_ctxbitmap_init(dev);
        if (retcode) {
@@ -200,6 +302,7 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
        new_minor->device = MKDEV(DRM_MAJOR, minor_id);
        new_minor->dev = dev;
        new_minor->index = minor_id;
+       INIT_LIST_HEAD(&new_minor->master_list);
 
        idr_replace(&drm_minors_idr, new_minor, minor_id);
 
@@ -267,8 +370,30 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
                printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
                goto err_g2;
        }
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+               if (ret)
+                       goto err_g2;
+       }
+
        if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
-               goto err_g2;
+               goto err_g3;
+
+       if (dev->driver->load) {
+               ret = dev->driver->load(dev, ent->driver_data);
+               if (ret)
+                       goto err_g3;
+       }
+
+        /* setup the grouping for the legacy output */
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
+               if (ret)
+                       goto err_g3;
+       }
+
+       list_add_tail(&dev->driver_item, &driver->device_list);
 
        DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
                 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -276,6 +401,8 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 
        return 0;
 
+err_g3:
+       drm_put_minor(&dev->primary);
 err_g2:
        pci_disable_device(pdev);
 err_g1:
@@ -297,11 +424,6 @@ int drm_put_dev(struct drm_device * dev)
 {
        DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name);
 
-       if (dev->unique) {
-               drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
-               dev->unique = NULL;
-               dev->unique_len = 0;
-       }
        if (dev->devname) {
                drm_free(dev->devname, strlen(dev->devname) + 1,
                         DRM_MEM_DRIVER);
index 1611b9bcbe7fd6378bc91a069ff8b8afab0d968b..65d72d094c81803757c00133bc9bd58895a2d03a 100644 (file)
@@ -20,6 +20,7 @@
 #include "drmP.h"
 
 #define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
+#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
 
 /**
  * drm_sysfs_suspend - DRM class suspend hook
@@ -34,7 +35,7 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
        struct drm_minor *drm_minor = to_drm_minor(dev);
        struct drm_device *drm_dev = drm_minor->dev;
 
-       if (drm_dev->driver->suspend)
+       if (drm_minor->type == DRM_MINOR_LEGACY && drm_dev->driver->suspend)
                return drm_dev->driver->suspend(drm_dev, state);
 
        return 0;
@@ -52,7 +53,7 @@ static int drm_sysfs_resume(struct device *dev)
        struct drm_minor *drm_minor = to_drm_minor(dev);
        struct drm_device *drm_dev = drm_minor->dev;
 
-       if (drm_dev->driver->resume)
+       if (drm_minor->type == DRM_MINOR_LEGACY && drm_dev->driver->resume)
                return drm_dev->driver->resume(drm_dev);
 
        return 0;
@@ -144,6 +145,323 @@ static void drm_sysfs_device_release(struct device *dev)
        return;
 }
 
+/*
+ * Connector properties
+ */
+static ssize_t status_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       enum drm_connector_status status;
+
+       status = connector->funcs->detect(connector);
+       return snprintf(buf, PAGE_SIZE, "%s",
+                       drm_get_connector_status_name(status));
+}
+
+static ssize_t dpms_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_device *dev = connector->dev;
+       uint64_t dpms_status;
+       int ret;
+
+       ret = drm_connector_property_get_value(connector,
+                                           dev->mode_config.dpms_property,
+                                           &dpms_status);
+       if (ret)
+               return 0;
+
+       return snprintf(buf, PAGE_SIZE, "%s",
+                       drm_get_dpms_name((int)dpms_status));
+}
+
+static ssize_t enabled_show(struct device *device,
+                           struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+
+       return snprintf(buf, PAGE_SIZE, connector->encoder ? "enabled" :
+                       "disabled");
+}
+
+static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
+                        char *buf, loff_t off, size_t count)
+{
+       struct device *connector_dev = container_of(kobj, struct device, kobj);
+       struct drm_connector *connector = to_drm_connector(connector_dev);
+       unsigned char *edid;
+       size_t size;
+
+       if (!connector->edid_blob_ptr)
+               return 0;
+
+       edid = connector->edid_blob_ptr->data;
+       size = connector->edid_blob_ptr->length;
+       if (!edid)
+               return 0;
+
+       if (off >= size)
+               return 0;
+
+       if (off + count > size)
+               count = size - off;
+       memcpy(buf, edid + off, count);
+
+       return count;
+}
+
+static ssize_t modes_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_display_mode *mode;
+       int written = 0;
+
+       list_for_each_entry(mode, &connector->modes, head) {
+               written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
+                                   mode->name);
+       }
+
+       return written;
+}
+
+static ssize_t subconnector_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_device *dev = connector->dev;
+       struct drm_property *prop = NULL;
+       uint64_t subconnector;
+       int is_tv = 0;
+       int ret;
+
+       switch (connector->connector_type) {
+               case DRM_MODE_CONNECTOR_DVII:
+                       prop = dev->mode_config.dvi_i_subconnector_property;
+                       break;
+               case DRM_MODE_CONNECTOR_Composite:
+               case DRM_MODE_CONNECTOR_SVIDEO:
+               case DRM_MODE_CONNECTOR_Component:
+                       prop = dev->mode_config.tv_subconnector_property;
+                       is_tv = 1;
+                       break;
+               default:
+                       DRM_ERROR("Wrong connector type for this property\n");
+                       return 0;
+       }
+
+       if (!prop) {
+               DRM_ERROR("Unable to find subconnector property\n");
+               return 0;
+       }
+
+       ret = drm_connector_property_get_value(connector, prop, &subconnector);
+       if (ret)
+               return 0;
+
+       return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
+                       drm_get_tv_subconnector_name((int)subconnector) :
+                       drm_get_dvi_i_subconnector_name((int)subconnector));
+}
+
+static ssize_t select_subconnector_show(struct device *device,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct drm_connector *connector = to_drm_connector(device);
+       struct drm_device *dev = connector->dev;
+       struct drm_property *prop = NULL;
+       uint64_t subconnector;
+       int is_tv = 0;
+       int ret;
+
+       switch (connector->connector_type) {
+               case DRM_MODE_CONNECTOR_DVII:
+                       prop = dev->mode_config.dvi_i_select_subconnector_property;
+                       break;
+               case DRM_MODE_CONNECTOR_Composite:
+               case DRM_MODE_CONNECTOR_SVIDEO:
+               case DRM_MODE_CONNECTOR_Component:
+                       prop = dev->mode_config.tv_select_subconnector_property;
+                       is_tv = 1;
+                       break;
+               default:
+                       DRM_ERROR("Wrong connector type for this property\n");
+                       return 0;
+       }
+
+       if (!prop) {
+               DRM_ERROR("Unable to find select subconnector property\n");
+               return 0;
+       }
+
+       ret = drm_connector_property_get_value(connector, prop, &subconnector);
+       if (ret)
+               return 0;
+
+       return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
+                       drm_get_tv_select_name((int)subconnector) :
+                       drm_get_dvi_i_select_name((int)subconnector));
+}
+
+static struct device_attribute connector_attrs[] = {
+       __ATTR_RO(status),
+       __ATTR_RO(enabled),
+       __ATTR_RO(dpms),
+       __ATTR_RO(modes),
+};
+
+/* These attributes are for both DVI-I connectors and all types of tv-out. */
+static struct device_attribute connector_attrs_opt1[] = {
+       __ATTR_RO(subconnector),
+       __ATTR_RO(select_subconnector),
+};
+
+static struct bin_attribute edid_attr = {
+       .attr.name = "edid",
+       .size = 128,
+       .read = edid_show,
+};
+
+/**
+ * drm_sysfs_connector_add - add an connector to sysfs
+ * @connector: connector to add
+ *
+ * Create an connector device in sysfs, along with its associated connector
+ * properties (so far, connection status, dpms, mode list & edid) and
+ * generate a hotplug event so userspace knows there's a new connector
+ * available.
+ *
+ * Note:
+ * This routine should only be called *once* for each DRM minor registered.
+ * A second call for an already registered device will trigger the BUG_ON
+ * below.
+ */
+int drm_sysfs_connector_add(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       int ret = 0, i, j;
+
+       /* We shouldn't get called more than once for the same connector */
+       BUG_ON(device_is_registered(&connector->kdev));
+
+       connector->kdev.parent = &dev->primary->kdev;
+       connector->kdev.class = drm_class;
+       connector->kdev.release = drm_sysfs_device_release;
+
+       DRM_DEBUG("adding \"%s\" to sysfs\n",
+                 drm_get_connector_name(connector));
+
+       snprintf(connector->kdev.bus_id, BUS_ID_SIZE, "card%d-%s",
+                dev->primary->index, drm_get_connector_name(connector));
+       ret = device_register(&connector->kdev);
+
+       if (ret) {
+               DRM_ERROR("failed to register connector device: %d\n", ret);
+               goto out;
+       }
+
+       /* Standard attributes */
+
+       for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) {
+               ret = device_create_file(&connector->kdev, &connector_attrs[i]);
+               if (ret)
+                       goto err_out_files;
+       }
+
+       /* Optional attributes */
+       /*
+        * In the long run it maybe a good idea to make one set of
+        * optionals per connector type.
+        */
+       switch (connector->connector_type) {
+               case DRM_MODE_CONNECTOR_DVII:
+               case DRM_MODE_CONNECTOR_Composite:
+               case DRM_MODE_CONNECTOR_SVIDEO:
+               case DRM_MODE_CONNECTOR_Component:
+                       for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) {
+                               ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]);
+                               if (ret)
+                                       goto err_out_files;
+                       }
+                       break;
+               default:
+                       break;
+       }
+
+       ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
+       if (ret)
+               goto err_out_files;
+
+       /* Let userspace know we have a new connector */
+       drm_sysfs_hotplug_event(dev);
+
+       return 0;
+
+err_out_files:
+       if (i > 0)
+               for (j = 0; j < i; j++)
+                       device_remove_file(&connector->kdev,
+                                          &connector_attrs[i]);
+       device_unregister(&connector->kdev);
+
+out:
+       return ret;
+}
+EXPORT_SYMBOL(drm_sysfs_connector_add);
+
+/**
+ * drm_sysfs_connector_remove - remove an connector device from sysfs
+ * @connector: connector to remove
+ *
+ * Remove @connector and its associated attributes from sysfs.  Note that
+ * the device model core will take care of sending the "remove" uevent
+ * at this time, so we don't need to do it.
+ *
+ * Note:
+ * This routine should only be called if the connector was previously
+ * successfully registered.  If @connector hasn't been registered yet,
+ * you'll likely see a panic somewhere deep in sysfs code when called.
+ */
+void drm_sysfs_connector_remove(struct drm_connector *connector)
+{
+       int i;
+
+       DRM_DEBUG("removing \"%s\" from sysfs\n",
+                 drm_get_connector_name(connector));
+
+       for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
+               device_remove_file(&connector->kdev, &connector_attrs[i]);
+       sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
+       device_unregister(&connector->kdev);
+}
+EXPORT_SYMBOL(drm_sysfs_connector_remove);
+
+/**
+ * drm_sysfs_hotplug_event - generate a DRM uevent
+ * @dev: DRM device
+ *
+ * Send a uevent for the DRM device specified by @dev.  Currently we only
+ * set HOTPLUG=1 in the uevent environment, but this could be expanded to
+ * deal with other types of events.
+ */
+void drm_sysfs_hotplug_event(struct drm_device *dev)
+{
+       char *event_string = "HOTPLUG=1";
+       char *envp[] = { event_string, NULL };
+
+       DRM_DEBUG("generating hotplug event\n");
+
+       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+}
+
 /**
  * drm_sysfs_device_add - adds a class device to sysfs for a character driver
  * @dev: DRM device to be added
@@ -163,7 +481,12 @@ int drm_sysfs_device_add(struct drm_minor *minor)
        minor->kdev.class = drm_class;
        minor->kdev.release = drm_sysfs_device_release;
        minor->kdev.devt = minor->device;
-       minor_str = "card%d";
+       if (minor->type == DRM_MINOR_CONTROL)
+               minor_str = "controlD%d";
+        else if (minor->type == DRM_MINOR_RENDER)
+                minor_str = "renderD%d";
+        else
+                minor_str = "card%d";
 
        snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index);
 
index c234c6f24a8d8fd157a1af029d530d5bbc09bd84..3ffae021d28052173898a3ed449adbc6e96d61d1 100644 (file)
@@ -267,6 +267,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
                                dmah.size = map->size;
                                __drm_pci_free(dev, &dmah);
                                break;
+                       case _DRM_GEM:
+                               DRM_ERROR("tried to rmmap GEM object\n");
+                               break;
                        }
                        drm_free(map, sizeof(*map), DRM_MEM_MAPS);
                }
@@ -399,7 +402,7 @@ static struct vm_operations_struct drm_vm_sg_ops = {
  * Create a new drm_vma_entry structure as the \p vma private data entry and
  * add it to drm_device::vmalist.
  */
-static void drm_vm_open_locked(struct vm_area_struct *vma)
+void drm_vm_open_locked(struct vm_area_struct *vma)
 {
        struct drm_file *priv = vma->vm_file->private_data;
        struct drm_device *dev = priv->minor->dev;
@@ -540,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
  * according to the mapping type and remaps the pages. Finally sets the file
  * pointer and calls vm_open().
  */
-static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
+int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 {
        struct drm_file *priv = filp->private_data;
        struct drm_device *dev = priv->minor->dev;
index d8fb5d8ee7ea38aa55308bda89ebee06978f1366..dd57a5bd457248505922e552be656982c295ad56 100644 (file)
@@ -8,7 +8,22 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
          i915_gem.o \
          i915_gem_debug.o \
          i915_gem_proc.o \
-         i915_gem_tiling.o
+         i915_gem_tiling.o \
+         intel_display.o \
+         intel_crt.o \
+         intel_lvds.o \
+         intel_bios.o \
+         intel_sdvo.o \
+         intel_modes.o \
+         intel_i2c.o \
+         intel_fb.o \
+         intel_tv.o \
+         intel_dvo.o \
+         dvo_ch7xxx.o \
+         dvo_ch7017.o \
+         dvo_ivch.o \
+         dvo_tfp410.o \
+         dvo_sil164.o
 
 i915-$(CONFIG_ACPI)    += i915_opregion.o
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
new file mode 100644 (file)
index 0000000..e747ac4
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * Copyright Â© 2006 Eric Anholt
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _INTEL_DVO_H
+#define _INTEL_DVO_H
+
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "intel_drv.h"
+
+struct intel_dvo_device {
+       char *name;
+       int type;
+       /* DVOA/B/C output register */
+       u32 dvo_reg;
+       /* GPIO register used for i2c bus to control this device */
+       u32 gpio;
+       int slave_addr;
+       struct intel_i2c_chan *i2c_bus;
+
+       const struct intel_dvo_dev_ops *dev_ops;
+       void *dev_priv;
+
+       struct drm_display_mode *panel_fixed_mode;
+       bool panel_wants_dither;
+};
+
+struct intel_dvo_dev_ops {
+       /*
+        * Initialize the device at startup time.
+        * Returns NULL if the device does not exist.
+        */
+       bool (*init)(struct intel_dvo_device *dvo,
+                    struct intel_i2c_chan *i2cbus);
+
+       /*
+        * Called to allow the output a chance to create properties after the
+        * RandR objects have been created.
+        */
+       void (*create_resources)(struct intel_dvo_device *dvo);
+
+       /*
+        * Turn on/off output or set intermediate power levels if available.
+        *
+        * Unsupported intermediate modes drop to the lower power setting.
+        * If the  mode is DPMSModeOff, the output must be disabled,
+        * as the DPLL may be disabled afterwards.
+        */
+       void (*dpms)(struct intel_dvo_device *dvo, int mode);
+
+       /*
+        * Saves the output's state for restoration on VT switch.
+        */
+       void (*save)(struct intel_dvo_device *dvo);
+
+       /*
+        * Restore's the output's state at VT switch.
+        */
+       void (*restore)(struct intel_dvo_device *dvo);
+
+       /*
+        * Callback for testing a video mode for a given output.
+        *
+        * This function should only check for cases where a mode can't
+        * be supported on the output specifically, and not represent
+        * generic CRTC limitations.
+        *
+        * \return MODE_OK if the mode is valid, or another MODE_* otherwise.
+        */
+       int (*mode_valid)(struct intel_dvo_device *dvo,
+                         struct drm_display_mode *mode);
+
+       /*
+        * Callback to adjust the mode to be set in the CRTC.
+        *
+        * This allows an output to adjust the clock or even the entire set of
+        * timings, which is used for panels with fixed timings or for
+        * buses with clock limitations.
+        */
+       bool (*mode_fixup)(struct intel_dvo_device *dvo,
+                          struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode);
+
+       /*
+        * Callback for preparing mode changes on an output
+        */
+       void (*prepare)(struct intel_dvo_device *dvo);
+
+       /*
+        * Callback for committing mode changes on an output
+        */
+       void (*commit)(struct intel_dvo_device *dvo);
+
+       /*
+        * Callback for setting up a video mode after fixups have been made.
+        *
+        * This is only called while the output is disabled.  The dpms callback
+        * must be all that's necessary for the output, to turn the output on
+        * after this function is called.
+        */
+       void (*mode_set)(struct intel_dvo_device *dvo,
+                        struct drm_display_mode *mode,
+                        struct drm_display_mode *adjusted_mode);
+
+       /*
+        * Probe for a connected output, and return detect_status.
+        */
+       enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
+
+       /**
+        * Query the device for the modes it provides.
+        *
+        * This function may also update MonInfo, mm_width, and mm_height.
+        *
+        * \return singly-linked list of modes or NULL if no modes found.
+        */
+       struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
+
+       /**
+        * Clean up driver-specific bits of the output
+        */
+       void (*destroy) (struct intel_dvo_device *dvo);
+
+       /**
+        * Debugging hook to dump device registers to log file
+        */
+       void (*dump_regs)(struct intel_dvo_device *dvo);
+};
+
+extern struct intel_dvo_dev_ops sil164_ops;
+extern struct intel_dvo_dev_ops ch7xxx_ops;
+extern struct intel_dvo_dev_ops ivch_ops;
+extern struct intel_dvo_dev_ops tfp410_ops;
+extern struct intel_dvo_dev_ops ch7017_ops;
+
+#endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
new file mode 100644 (file)
index 0000000..03d4b49
--- /dev/null
@@ -0,0 +1,454 @@
+/*
+ * Copyright Â© 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "dvo.h"
+
+#define CH7017_TV_DISPLAY_MODE         0x00
+#define CH7017_FLICKER_FILTER          0x01
+#define CH7017_VIDEO_BANDWIDTH         0x02
+#define CH7017_TEXT_ENHANCEMENT                0x03
+#define CH7017_START_ACTIVE_VIDEO      0x04
+#define CH7017_HORIZONTAL_POSITION     0x05
+#define CH7017_VERTICAL_POSITION       0x06
+#define CH7017_BLACK_LEVEL             0x07
+#define CH7017_CONTRAST_ENHANCEMENT    0x08
+#define CH7017_TV_PLL                  0x09
+#define CH7017_TV_PLL_M                        0x0a
+#define CH7017_TV_PLL_N                        0x0b
+#define CH7017_SUB_CARRIER_0           0x0c
+#define CH7017_CIV_CONTROL             0x10
+#define CH7017_CIV_0                   0x11
+#define CH7017_CHROMA_BOOST            0x14
+#define CH7017_CLOCK_MODE              0x1c
+#define CH7017_INPUT_CLOCK             0x1d
+#define CH7017_GPIO_CONTROL            0x1e
+#define CH7017_INPUT_DATA_FORMAT       0x1f
+#define CH7017_CONNECTION_DETECT       0x20
+#define CH7017_DAC_CONTROL             0x21
+#define CH7017_BUFFERED_CLOCK_OUTPUT   0x22
+#define CH7017_DEFEAT_VSYNC            0x47
+#define CH7017_TEST_PATTERN            0x48
+
+#define CH7017_POWER_MANAGEMENT                0x49
+/** Enables the TV output path. */
+#define CH7017_TV_EN                   (1 << 0)
+#define CH7017_DAC0_POWER_DOWN         (1 << 1)
+#define CH7017_DAC1_POWER_DOWN         (1 << 2)
+#define CH7017_DAC2_POWER_DOWN         (1 << 3)
+#define CH7017_DAC3_POWER_DOWN         (1 << 4)
+/** Powers down the TV out block, and DAC0-3 */
+#define CH7017_TV_POWER_DOWN_EN                (1 << 5)
+
+#define CH7017_VERSION_ID              0x4a
+
+#define CH7017_DEVICE_ID               0x4b
+#define CH7017_DEVICE_ID_VALUE         0x1b
+#define CH7018_DEVICE_ID_VALUE         0x1a
+#define CH7019_DEVICE_ID_VALUE         0x19
+
+#define CH7017_XCLK_D2_ADJUST          0x53
+#define CH7017_UP_SCALER_COEFF_0       0x55
+#define CH7017_UP_SCALER_COEFF_1       0x56
+#define CH7017_UP_SCALER_COEFF_2       0x57
+#define CH7017_UP_SCALER_COEFF_3       0x58
+#define CH7017_UP_SCALER_COEFF_4       0x59
+#define CH7017_UP_SCALER_VERTICAL_INC_0        0x5a
+#define CH7017_UP_SCALER_VERTICAL_INC_1        0x5b
+#define CH7017_GPIO_INVERT             0x5c
+#define CH7017_UP_SCALER_HORIZONTAL_INC_0      0x5d
+#define CH7017_UP_SCALER_HORIZONTAL_INC_1      0x5e
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT   0x5f
+/**< Low bits of horizontal active pixel input */
+
+#define CH7017_ACTIVE_INPUT_LINE_OUTPUT        0x60
+/** High bits of horizontal active pixel input */
+#define CH7017_LVDS_HAP_INPUT_MASK     (0x7 << 0)
+/** High bits of vertical active line output */
+#define CH7017_LVDS_VAL_HIGH_MASK      (0x7 << 3)
+
+#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT     0x61
+/**< Low bits of vertical active line output */
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT  0x62
+/**< Low bits of horizontal active pixel output */
+
+#define CH7017_LVDS_POWER_DOWN         0x63
+/** High bits of horizontal active pixel output */
+#define CH7017_LVDS_HAP_HIGH_MASK      (0x7 << 0)
+/** Enables the LVDS power down state transition */
+#define CH7017_LVDS_POWER_DOWN_EN      (1 << 6)
+/** Enables the LVDS upscaler */
+#define CH7017_LVDS_UPSCALER_EN                (1 << 7)
+#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08
+
+#define CH7017_LVDS_ENCODING           0x64
+#define CH7017_LVDS_DITHER_2D          (1 << 2)
+#define CH7017_LVDS_DITHER_DIS         (1 << 3)
+#define CH7017_LVDS_DUAL_CHANNEL_EN    (1 << 4)
+#define CH7017_LVDS_24_BIT             (1 << 5)
+
+#define CH7017_LVDS_ENCODING_2         0x65
+
+#define CH7017_LVDS_PLL_CONTROL                0x66
+/** Enables the LVDS panel output path */
+#define CH7017_LVDS_PANEN              (1 << 0)
+/** Enables the LVDS panel backlight */
+#define CH7017_LVDS_BKLEN              (1 << 3)
+
+#define CH7017_POWER_SEQUENCING_T1     0x67
+#define CH7017_POWER_SEQUENCING_T2     0x68
+#define CH7017_POWER_SEQUENCING_T3     0x69
+#define CH7017_POWER_SEQUENCING_T4     0x6a
+#define CH7017_POWER_SEQUENCING_T5     0x6b
+#define CH7017_GPIO_DRIVER_TYPE                0x6c
+#define CH7017_GPIO_DATA               0x6d
+#define CH7017_GPIO_DIRECTION_CONTROL  0x6e
+
+#define CH7017_LVDS_PLL_FEEDBACK_DIV   0x71
+# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4
+# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0
+# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80
+
+#define CH7017_LVDS_PLL_VCO_CONTROL    0x72
+# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80
+# define CH7017_LVDS_PLL_VCO_SHIFT     4
+# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0
+
+#define CH7017_OUTPUTS_ENABLE          0x73
+# define CH7017_CHARGE_PUMP_LOW                0x0
+# define CH7017_CHARGE_PUMP_HIGH       0x3
+# define CH7017_LVDS_CHANNEL_A         (1 << 3)
+# define CH7017_LVDS_CHANNEL_B         (1 << 4)
+# define CH7017_TV_DAC_A               (1 << 5)
+# define CH7017_TV_DAC_B               (1 << 6)
+# define CH7017_DDC_SELECT_DC2         (1 << 7)
+
+#define CH7017_LVDS_OUTPUT_AMPLITUDE   0x74
+#define CH7017_LVDS_PLL_EMI_REDUCTION  0x75
+#define CH7017_LVDS_POWER_DOWN_FLICKER 0x76
+
+#define CH7017_LVDS_CONTROL_2          0x78
+# define CH7017_LOOP_FILTER_SHIFT      5
+# define CH7017_PHASE_DETECTOR_SHIFT   0
+
+#define CH7017_BANG_LIMIT_CONTROL      0x7f
+
+struct ch7017_priv {
+       uint8_t save_hapi;
+       uint8_t save_vali;
+       uint8_t save_valo;
+       uint8_t save_ailo;
+       uint8_t save_lvds_pll_vco;
+       uint8_t save_feedback_div;
+       uint8_t save_lvds_control_2;
+       uint8_t save_outputs_enable;
+       uint8_t save_lvds_power_down;
+       uint8_t save_power_management;
+};
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo);
+static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
+
+static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
+{
+       struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+       u8 out_buf[2];
+       u8 in_buf[2];
+
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = i2cbus->slave_addr,
+                       .flags = 0,
+                       .len = 1,
+                       .buf = out_buf,
+               },
+               {
+                       .addr = i2cbus->slave_addr,
+                       .flags = I2C_M_RD,
+                       .len = 1,
+                       .buf = in_buf,
+               }
+       };
+
+       out_buf[0] = addr;
+       out_buf[1] = 0;
+
+       if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+               *val= in_buf[0];
+               return true;
+       };
+
+       return false;
+}
+
+static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
+{
+       struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+       uint8_t out_buf[2];
+       struct i2c_msg msg = {
+               .addr = i2cbus->slave_addr,
+               .flags = 0,
+               .len = 2,
+               .buf = out_buf,
+       };
+
+       out_buf[0] = addr;
+       out_buf[1] = val;
+
+       if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+               return true;
+
+       return false;
+}
+
+/** Probes for a CH7017 on the given bus and slave address. */
+static bool ch7017_init(struct intel_dvo_device *dvo,
+                       struct intel_i2c_chan *i2cbus)
+{
+       struct ch7017_priv *priv;
+       uint8_t val;
+
+       priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
+       if (priv == NULL)
+               return false;
+
+       dvo->i2c_bus = i2cbus;
+       dvo->i2c_bus->slave_addr = dvo->slave_addr;
+       dvo->dev_priv = priv;
+
+       if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
+               goto fail;
+
+       if (val != CH7017_DEVICE_ID_VALUE &&
+           val != CH7018_DEVICE_ID_VALUE &&
+           val != CH7019_DEVICE_ID_VALUE) {
+               DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
+                         val, i2cbus->adapter.name,i2cbus->slave_addr);
+               goto fail;
+       }
+
+       return true;
+fail:
+       kfree(priv);
+       return false;
+}
+
+static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
+{
+       return connector_status_unknown;
+}
+
+static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
+                                             struct drm_display_mode *mode)
+{
+       if (mode->clock > 160000)
+               return MODE_CLOCK_HIGH;
+
+       return MODE_OK;
+}
+
+static void ch7017_mode_set(struct intel_dvo_device *dvo,
+                           struct drm_display_mode *mode,
+                           struct drm_display_mode *adjusted_mode)
+{
+       uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
+       uint8_t outputs_enable, lvds_control_2, lvds_power_down;
+       uint8_t horizontal_active_pixel_input;
+       uint8_t horizontal_active_pixel_output, vertical_active_line_output;
+       uint8_t active_input_line_output;
+
+       DRM_DEBUG("Registers before mode setting\n");
+       ch7017_dump_regs(dvo);
+
+       /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
+       if (mode->clock < 100000) {
+               outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW;
+               lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+                       (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+                       (13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+               lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+                       (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+                       (3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+               lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) |
+                       (0 << CH7017_PHASE_DETECTOR_SHIFT);
+       } else {
+               outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH;
+               lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+                       (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+                       (3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+               lvds_pll_feedback_div = 35;
+               lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) |
+                       (0 << CH7017_PHASE_DETECTOR_SHIFT);
+               if (1) { /* XXX: dual channel panel detection.  Assume yes for now. */
+                       outputs_enable |= CH7017_LVDS_CHANNEL_B;
+                       lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+                               (2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+                               (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+               } else {
+                       lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+                               (1 << CH7017_LVDS_PLL_VCO_SHIFT) |
+                               (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+               }
+       }
+
+       horizontal_active_pixel_input = mode->hdisplay & 0x00ff;
+
+       vertical_active_line_output = mode->vdisplay & 0x00ff;
+       horizontal_active_pixel_output = mode->hdisplay & 0x00ff;
+
+       active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) |
+                                  (((mode->vdisplay & 0x0700) >> 8) << 3);
+
+       lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
+                         (mode->hdisplay & 0x0700) >> 8;
+
+       ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
+       ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
+                       horizontal_active_pixel_input);
+       ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
+                       horizontal_active_pixel_output);
+       ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT,
+                       vertical_active_line_output);
+       ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT,
+                       active_input_line_output);
+       ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control);
+       ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div);
+       ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2);
+       ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable);
+
+       /* Turn the LVDS back on with new settings. */
+       ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
+
+       DRM_DEBUG("Registers after mode setting\n");
+       ch7017_dump_regs(dvo);
+}
+
+/* set the CH7017 power state */
+static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
+{
+       uint8_t val;
+
+       ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+       /* Turn off TV/VGA, and never turn it on since we don't support it. */
+       ch7017_write(dvo, CH7017_POWER_MANAGEMENT,
+                       CH7017_DAC0_POWER_DOWN |
+                       CH7017_DAC1_POWER_DOWN |
+                       CH7017_DAC2_POWER_DOWN |
+                       CH7017_DAC3_POWER_DOWN |
+                       CH7017_TV_POWER_DOWN_EN);
+
+       if (mode == DRM_MODE_DPMS_ON) {
+               /* Turn on the LVDS */
+               ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+                            val & ~CH7017_LVDS_POWER_DOWN_EN);
+       } else {
+               /* Turn off the LVDS */
+               ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+                            val | CH7017_LVDS_POWER_DOWN_EN);
+       }
+
+       /* XXX: Should actually wait for update power status somehow */
+       udelay(20000);
+}
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo)
+{
+       uint8_t val;
+
+#define DUMP(reg)                                      \
+do {                                                   \
+       ch7017_read(dvo, reg, &val);                    \
+       DRM_DEBUG(#reg ": %02x\n", val);                \
+} while (0)
+
+       DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
+       DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT);
+       DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT);
+       DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT);
+       DUMP(CH7017_LVDS_PLL_VCO_CONTROL);
+       DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV);
+       DUMP(CH7017_LVDS_CONTROL_2);
+       DUMP(CH7017_OUTPUTS_ENABLE);
+       DUMP(CH7017_LVDS_POWER_DOWN);
+}
+
+static void ch7017_save(struct intel_dvo_device *dvo)
+{
+       struct ch7017_priv *priv = dvo->dev_priv;
+
+       ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
+       ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
+       ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
+       ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
+       ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
+       ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
+       ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
+       ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
+       ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
+}
+
+static void ch7017_restore(struct intel_dvo_device *dvo)
+{
+       struct ch7017_priv *priv = dvo->dev_priv;
+
+       /* Power down before changing mode */
+       ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
+
+       ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
+       ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
+       ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
+       ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
+       ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
+       ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
+       ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
+       ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
+       ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
+}
+
+static void ch7017_destroy(struct intel_dvo_device *dvo)
+{
+       struct ch7017_priv *priv = dvo->dev_priv;
+
+       if (priv) {
+               kfree(priv);
+               dvo->dev_priv = NULL;
+       }
+}
+
+struct intel_dvo_dev_ops ch7017_ops = {
+       .init = ch7017_init,
+       .detect = ch7017_detect,
+       .mode_valid = ch7017_mode_valid,
+       .mode_set = ch7017_mode_set,
+       .dpms = ch7017_dpms,
+       .dump_regs = ch7017_dump_regs,
+       .save = ch7017_save,
+       .restore = ch7017_restore,
+       .destroy = ch7017_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
new file mode 100644 (file)
index 0000000..d2fd95d
--- /dev/null
@@ -0,0 +1,368 @@
+/**************************************************************************
+
+Copyright Â© 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "dvo.h"
+
+#define CH7xxx_REG_VID         0x4a
+#define CH7xxx_REG_DID         0x4b
+
+#define CH7011_VID             0x83 /* 7010 as well */
+#define CH7009A_VID            0x84
+#define CH7009B_VID            0x85
+#define CH7301_VID             0x95
+
+#define CH7xxx_VID             0x84
+#define CH7xxx_DID             0x17
+
+#define CH7xxx_NUM_REGS                0x4c
+
+#define CH7xxx_CM              0x1c
+#define CH7xxx_CM_XCM          (1<<0)
+#define CH7xxx_CM_MCP          (1<<2)
+#define CH7xxx_INPUT_CLOCK     0x1d
+#define CH7xxx_GPIO            0x1e
+#define CH7xxx_GPIO_HPIR       (1<<3)
+#define CH7xxx_IDF             0x1f
+
+#define CH7xxx_IDF_HSP         (1<<3)
+#define CH7xxx_IDF_VSP         (1<<4)
+
+#define CH7xxx_CONNECTION_DETECT 0x20
+#define CH7xxx_CDET_DVI                (1<<5)
+
+#define CH7301_DAC_CNTL                0x21
+#define CH7301_HOTPLUG         0x23
+#define CH7xxx_TCTL            0x31
+#define CH7xxx_TVCO            0x32
+#define CH7xxx_TPCP            0x33
+#define CH7xxx_TPD             0x34
+#define CH7xxx_TPVT            0x35
+#define CH7xxx_TLPF            0x36
+#define CH7xxx_TCT             0x37
+#define CH7301_TEST_PATTERN    0x48
+
+#define CH7xxx_PM              0x49
+#define CH7xxx_PM_FPD          (1<<0)
+#define CH7301_PM_DACPD0       (1<<1)
+#define CH7301_PM_DACPD1       (1<<2)
+#define CH7301_PM_DACPD2       (1<<3)
+#define CH7xxx_PM_DVIL         (1<<6)
+#define CH7xxx_PM_DVIP         (1<<7)
+
+#define CH7301_SYNC_POLARITY   0x56
+#define CH7301_SYNC_RGB_YUV    (1<<0)
+#define CH7301_SYNC_POL_DVI    (1<<5)
+
+/** @file
+ * driver for the Chrontel 7xxx DVI chip over DVO.
+ */
+
+static struct ch7xxx_id_struct {
+       uint8_t vid;
+       char *name;
+} ch7xxx_ids[] = {
+       { CH7011_VID, "CH7011" },
+       { CH7009A_VID, "CH7009A" },
+       { CH7009B_VID, "CH7009B" },
+       { CH7301_VID, "CH7301" },
+};
+
+struct ch7xxx_reg_state {
+    uint8_t regs[CH7xxx_NUM_REGS];
+};
+
+struct ch7xxx_priv {
+       bool quiet;
+
+       struct ch7xxx_reg_state save_reg;
+       struct ch7xxx_reg_state mode_reg;
+       uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
+       uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
+};
+
+static void ch7xxx_save(struct intel_dvo_device *dvo);
+
+static char *ch7xxx_get_id(uint8_t vid)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) {
+               if (ch7xxx_ids[i].vid == vid)
+                       return ch7xxx_ids[i].name;
+       }
+
+       return NULL;
+}
+
+/** Reads an 8 bit register */
+static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+       struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
+       struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+       u8 out_buf[2];
+       u8 in_buf[2];
+
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = i2cbus->slave_addr,
+                       .flags = 0,
+                       .len = 1,
+                       .buf = out_buf,
+               },
+               {
+                       .addr = i2cbus->slave_addr,
+                       .flags = I2C_M_RD,
+                       .len = 1,
+                       .buf = in_buf,
+               }
+       };
+
+       out_buf[0] = addr;
+       out_buf[1] = 0;
+
+       if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+               *ch = in_buf[0];
+               return true;
+       };
+
+       if (!ch7xxx->quiet) {
+               DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+                         addr, i2cbus->adapter.name, i2cbus->slave_addr);
+       }
+       return false;
+}
+
+/** Writes an 8 bit register */
+static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+       struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+       struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+       uint8_t out_buf[2];
+       struct i2c_msg msg = {
+               .addr = i2cbus->slave_addr,
+               .flags = 0,
+               .len = 2,
+               .buf = out_buf,
+       };
+
+       out_buf[0] = addr;
+       out_buf[1] = ch;
+
+       if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+               return true;
+
+       if (!ch7xxx->quiet) {
+               DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+                         addr, i2cbus->adapter.name, i2cbus->slave_addr);
+       }
+
+       return false;
+}
+
+static bool ch7xxx_init(struct intel_dvo_device *dvo,
+                       struct intel_i2c_chan *i2cbus)
+{
+       /* this will detect the CH7xxx chip on the specified i2c bus */
+       struct ch7xxx_priv *ch7xxx;
+       uint8_t vendor, device;
+       char *name;
+
+       ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
+       if (ch7xxx == NULL)
+               return false;
+
+       dvo->i2c_bus = i2cbus;
+       dvo->i2c_bus->slave_addr = dvo->slave_addr;
+       dvo->dev_priv = ch7xxx;
+       ch7xxx->quiet = true;
+
+       if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor))
+               goto out;
+
+       name = ch7xxx_get_id(vendor);
+       if (!name) {
+               DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+                         vendor, i2cbus->adapter.name, i2cbus->slave_addr);
+               goto out;
+       }
+
+
+       if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
+               goto out;
+
+       if (device != CH7xxx_DID) {
+               DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+                         vendor, i2cbus->adapter.name, i2cbus->slave_addr);
+               goto out;
+       }
+
+       ch7xxx->quiet = false;
+       DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+                 name, vendor, device);
+       return true;
+out:
+       kfree(ch7xxx);
+       return false;
+}
+
+static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
+{
+       uint8_t cdet, orig_pm, pm;
+
+       ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
+
+       pm = orig_pm;
+       pm &= ~CH7xxx_PM_FPD;
+       pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP;
+
+       ch7xxx_writeb(dvo, CH7xxx_PM, pm);
+
+       ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet);
+
+       ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm);
+
+       if (cdet & CH7xxx_CDET_DVI)
+               return connector_status_connected;
+       return connector_status_disconnected;
+}
+
+static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
+                                             struct drm_display_mode *mode)
+{
+       if (mode->clock > 165000)
+               return MODE_CLOCK_HIGH;
+
+       return MODE_OK;
+}
+
+static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
+                           struct drm_display_mode *mode,
+                           struct drm_display_mode *adjusted_mode)
+{
+       uint8_t tvco, tpcp, tpd, tlpf, idf;
+
+       if (mode->clock <= 65000) {
+               tvco = 0x23;
+               tpcp = 0x08;
+               tpd = 0x16;
+               tlpf = 0x60;
+       } else {
+               tvco = 0x2d;
+               tpcp = 0x06;
+               tpd = 0x26;
+               tlpf = 0xa0;
+       }
+
+       ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00);
+       ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco);
+       ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp);
+       ch7xxx_writeb(dvo, CH7xxx_TPD, tpd);
+       ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30);
+       ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf);
+       ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00);
+
+       ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
+
+       idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
+       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+               idf |= CH7xxx_IDF_HSP;
+
+       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+               idf |= CH7xxx_IDF_HSP;
+
+       ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
+}
+
+/* set the CH7xxx power state */
+static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
+{
+       if (mode == DRM_MODE_DPMS_ON)
+               ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
+       else
+               ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
+}
+
+static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
+{
+       struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+       int i;
+
+       for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+               if ((i % 8) == 0 )
+                       DRM_DEBUG("\n %02X: ", i);
+               DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]);
+       }
+}
+
+static void ch7xxx_save(struct intel_dvo_device *dvo)
+{
+       struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
+
+       ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
+       ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
+       ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
+       ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
+       ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
+       ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
+       ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
+}
+
+static void ch7xxx_restore(struct intel_dvo_device *dvo)
+{
+       struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+
+       ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
+       ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
+       ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
+       ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
+       ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
+       ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
+       ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
+}
+
+static void ch7xxx_destroy(struct intel_dvo_device *dvo)
+{
+       struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+
+       if (ch7xxx) {
+               kfree(ch7xxx);
+               dvo->dev_priv = NULL;
+       }
+}
+
+struct intel_dvo_dev_ops ch7xxx_ops = {
+       .init = ch7xxx_init,
+       .detect = ch7xxx_detect,
+       .mode_valid = ch7xxx_mode_valid,
+       .mode_set = ch7xxx_mode_set,
+       .dpms = ch7xxx_dpms,
+       .dump_regs = ch7xxx_dump_regs,
+       .save = ch7xxx_save,
+       .restore = ch7xxx_restore,
+       .destroy = ch7xxx_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
new file mode 100644 (file)
index 0000000..0c8d375
--- /dev/null
@@ -0,0 +1,442 @@
+/*
+ * Copyright Â© 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "dvo.h"
+
+/*
+ * register definitions for the i82807aa.
+ *
+ * Documentation on this chipset can be found in datasheet #29069001 at
+ * intel.com.
+ */
+
+/*
+ * VCH Revision & GMBus Base Addr
+ */
+#define VR00           0x00
+# define VR00_BASE_ADDRESS_MASK                0x007f
+
+/*
+ * Functionality Enable
+ */
+#define VR01           0x01
+
+/*
+ * Enable the panel fitter
+ */
+# define VR01_PANEL_FIT_ENABLE         (1 << 3)
+/*
+ * Enables the LCD display.
+ *
+ * This must not be set while VR01_DVO_BYPASS_ENABLE is set.
+ */
+# define VR01_LCD_ENABLE               (1 << 2)
+/** Enables the DVO repeater. */
+# define VR01_DVO_BYPASS_ENABLE                (1 << 1)
+/** Enables the DVO clock */
+# define VR01_DVO_ENABLE               (1 << 0)
+
+/*
+ * LCD Interface Format
+ */
+#define VR10           0x10
+/** Enables LVDS output instead of CMOS */
+# define VR10_LVDS_ENABLE              (1 << 4)
+/** Enables 18-bit LVDS output. */
+# define VR10_INTERFACE_1X18           (0 << 2)
+/** Enables 24-bit LVDS or CMOS output */
+# define VR10_INTERFACE_1X24           (1 << 2)
+/** Enables 2x18-bit LVDS or CMOS output. */
+# define VR10_INTERFACE_2X18           (2 << 2)
+/** Enables 2x24-bit LVDS output */
+# define VR10_INTERFACE_2X24           (3 << 2)
+
+/*
+ * VR20 LCD Horizontal Display Size
+ */
+#define VR20   0x20
+
+/*
+ * LCD Vertical Display Size
+ */
+#define VR21   0x20
+
+/*
+ * Panel power down status
+ */
+#define VR30           0x30
+/** Read only bit indicating that the panel is not in a safe poweroff state. */
+# define VR30_PANEL_ON                 (1 << 15)
+
+#define VR40           0x40
+# define VR40_STALL_ENABLE             (1 << 13)
+# define VR40_VERTICAL_INTERP_ENABLE   (1 << 12)
+# define VR40_ENHANCED_PANEL_FITTING   (1 << 11)
+# define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10)
+# define VR40_AUTO_RATIO_ENABLE                (1 << 9)
+# define VR40_CLOCK_GATING_ENABLE      (1 << 8)
+
+/*
+ * Panel Fitting Vertical Ratio
+ * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2
+ */
+#define VR41           0x41
+
+/*
+ * Panel Fitting Horizontal Ratio
+ * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2
+ */
+#define VR42           0x42
+
+/*
+ * Horizontal Image Size
+ */
+#define VR43           0x43
+
+/* VR80 GPIO 0
+ */
+#define VR80       0x80
+#define VR81       0x81
+#define VR82       0x82
+#define VR83       0x83
+#define VR84       0x84
+#define VR85       0x85
+#define VR86       0x86
+#define VR87       0x87
+
+/* VR88 GPIO 8
+ */
+#define VR88       0x88
+
+/* Graphics BIOS scratch 0
+ */
+#define VR8E       0x8E
+# define VR8E_PANEL_TYPE_MASK          (0xf << 0)
+# define VR8E_PANEL_INTERFACE_CMOS     (0 << 4)
+# define VR8E_PANEL_INTERFACE_LVDS     (1 << 4)
+# define VR8E_FORCE_DEFAULT_PANEL      (1 << 5)
+
+/* Graphics BIOS scratch 1
+ */
+#define VR8F       0x8F
+# define VR8F_VCH_PRESENT              (1 << 0)
+# define VR8F_DISPLAY_CONN             (1 << 1)
+# define VR8F_POWER_MASK               (0x3c)
+# define VR8F_POWER_POS                        (2)
+
+
+struct ivch_priv {
+       bool quiet;
+
+       uint16_t width, height;
+
+       uint16_t save_VR01;
+       uint16_t save_VR40;
+};
+
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo);
+
+/**
+ * Reads a register on the ivch.
+ *
+ * Each of the 256 registers are 16 bits long.
+ */
+static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+{
+       struct ivch_priv *priv = dvo->dev_priv;
+       struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+       u8 out_buf[1];
+       u8 in_buf[2];
+
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = i2cbus->slave_addr,
+                       .flags = I2C_M_RD,
+                       .len = 0,
+               },
+               {
+                       .addr = 0,
+                       .flags = I2C_M_NOSTART,
+                       .len = 1,
+                       .buf = out_buf,
+               },
+               {
+                       .addr = i2cbus->slave_addr,
+                       .flags = I2C_M_RD | I2C_M_NOSTART,
+                       .len = 2,
+                       .buf = in_buf,
+               }
+       };
+
+       out_buf[0] = addr;
+
+       if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) {
+               *data = (in_buf[1] << 8) | in_buf[0];
+               return true;
+       };
+
+       if (!priv->quiet) {
+               DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+                         addr, i2cbus->adapter.name, i2cbus->slave_addr);
+       }
+       return false;
+}
+
+/** Writes a 16-bit register on the ivch */
+static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+{
+       struct ivch_priv *priv = dvo->dev_priv;
+       struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+       u8 out_buf[3];
+       struct i2c_msg msg = {
+               .addr = i2cbus->slave_addr,
+               .flags = 0,
+               .len = 3,
+               .buf = out_buf,
+       };
+
+       out_buf[0] = addr;
+       out_buf[1] = data & 0xff;
+       out_buf[2] = data >> 8;
+
+       if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+               return true;
+
+       if (!priv->quiet) {
+               DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+                         addr, i2cbus->adapter.name, i2cbus->slave_addr);
+       }
+
+       return false;
+}
+
+/** Probes the given bus and slave address for an ivch */
+static bool ivch_init(struct intel_dvo_device *dvo,
+                     struct intel_i2c_chan *i2cbus)
+{
+       struct ivch_priv *priv;
+       uint16_t temp;
+
+       priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
+       if (priv == NULL)
+               return false;
+
+       dvo->i2c_bus = i2cbus;
+       dvo->i2c_bus->slave_addr = dvo->slave_addr;
+       dvo->dev_priv = priv;
+       priv->quiet = true;
+
+       if (!ivch_read(dvo, VR00, &temp))
+               goto out;
+       priv->quiet = false;
+
+       /* Since the identification bits are probably zeroes, which doesn't seem
+        * very unique, check that the value in the base address field matches
+        * the address it's responding on.
+        */
+       if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
+               DRM_DEBUG("ivch detect failed due to address mismatch "
+                         "(%d vs %d)\n",
+                         (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
+               goto out;
+       }
+
+       ivch_read(dvo, VR20, &priv->width);
+       ivch_read(dvo, VR21, &priv->height);
+
+       return true;
+
+out:
+       kfree(priv);
+       return false;
+}
+
+static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo)
+{
+       return connector_status_connected;
+}
+
+static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
+                                           struct drm_display_mode *mode)
+{
+       if (mode->clock > 112000)
+               return MODE_CLOCK_HIGH;
+
+       return MODE_OK;
+}
+
+/** Sets the power state of the panel connected to the ivch */
+static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
+{
+       int i;
+       uint16_t vr01, vr30, backlight;
+
+       /* Set the new power state of the panel. */
+       if (!ivch_read(dvo, VR01, &vr01))
+               return;
+
+       if (mode == DRM_MODE_DPMS_ON)
+               backlight = 1;
+       else
+               backlight = 0;
+       ivch_write(dvo, VR80, backlight);
+
+       if (mode == DRM_MODE_DPMS_ON)
+               vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
+       else
+               vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
+
+       ivch_write(dvo, VR01, vr01);
+
+       /* Wait for the panel to make its state transition */
+       for (i = 0; i < 100; i++) {
+               if (!ivch_read(dvo, VR30, &vr30))
+                       break;
+
+               if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON))
+                       break;
+               udelay(1000);
+       }
+       /* wait some more; vch may fail to resync sometimes without this */
+       udelay(16 * 1000);
+}
+
+static void ivch_mode_set(struct intel_dvo_device *dvo,
+                         struct drm_display_mode *mode,
+                         struct drm_display_mode *adjusted_mode)
+{
+       uint16_t vr40 = 0;
+       uint16_t vr01;
+
+       vr01 = 0;
+       vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
+               VR40_HORIZONTAL_INTERP_ENABLE);
+
+       if (mode->hdisplay != adjusted_mode->hdisplay ||
+           mode->vdisplay != adjusted_mode->vdisplay) {
+               uint16_t x_ratio, y_ratio;
+
+               vr01 |= VR01_PANEL_FIT_ENABLE;
+               vr40 |= VR40_CLOCK_GATING_ENABLE;
+               x_ratio = (((mode->hdisplay - 1) << 16) /
+                          (adjusted_mode->hdisplay - 1)) >> 2;
+               y_ratio = (((mode->vdisplay - 1) << 16) /
+                          (adjusted_mode->vdisplay - 1)) >> 2;
+               ivch_write (dvo, VR42, x_ratio);
+               ivch_write (dvo, VR41, y_ratio);
+       } else {
+               vr01 &= ~VR01_PANEL_FIT_ENABLE;
+               vr40 &= ~VR40_CLOCK_GATING_ENABLE;
+       }
+       vr40 &= ~VR40_AUTO_RATIO_ENABLE;
+
+       ivch_write(dvo, VR01, vr01);
+       ivch_write(dvo, VR40, vr40);
+
+       ivch_dump_regs(dvo);
+}
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo)
+{
+       uint16_t val;
+
+       ivch_read(dvo, VR00, &val);
+       DRM_DEBUG("VR00: 0x%04x\n", val);
+       ivch_read(dvo, VR01, &val);
+       DRM_DEBUG("VR01: 0x%04x\n", val);
+       ivch_read(dvo, VR30, &val);
+       DRM_DEBUG("VR30: 0x%04x\n", val);
+       ivch_read(dvo, VR40, &val);
+       DRM_DEBUG("VR40: 0x%04x\n", val);
+
+       /* GPIO registers */
+       ivch_read(dvo, VR80, &val);
+       DRM_DEBUG("VR80: 0x%04x\n", val);
+       ivch_read(dvo, VR81, &val);
+       DRM_DEBUG("VR81: 0x%04x\n", val);
+       ivch_read(dvo, VR82, &val);
+       DRM_DEBUG("VR82: 0x%04x\n", val);
+       ivch_read(dvo, VR83, &val);
+       DRM_DEBUG("VR83: 0x%04x\n", val);
+       ivch_read(dvo, VR84, &val);
+       DRM_DEBUG("VR84: 0x%04x\n", val);
+       ivch_read(dvo, VR85, &val);
+       DRM_DEBUG("VR85: 0x%04x\n", val);
+       ivch_read(dvo, VR86, &val);
+       DRM_DEBUG("VR86: 0x%04x\n", val);
+       ivch_read(dvo, VR87, &val);
+       DRM_DEBUG("VR87: 0x%04x\n", val);
+       ivch_read(dvo, VR88, &val);
+       DRM_DEBUG("VR88: 0x%04x\n", val);
+
+       /* Scratch register 0 - AIM Panel type */
+       ivch_read(dvo, VR8E, &val);
+       DRM_DEBUG("VR8E: 0x%04x\n", val);
+
+       /* Scratch register 1 - Status register */
+       ivch_read(dvo, VR8F, &val);
+       DRM_DEBUG("VR8F: 0x%04x\n", val);
+}
+
+static void ivch_save(struct intel_dvo_device *dvo)
+{
+       struct ivch_priv *priv = dvo->dev_priv;
+
+       ivch_read(dvo, VR01, &priv->save_VR01);
+       ivch_read(dvo, VR40, &priv->save_VR40);
+}
+
+static void ivch_restore(struct intel_dvo_device *dvo)
+{
+       struct ivch_priv *priv = dvo->dev_priv;
+
+       ivch_write(dvo, VR01, priv->save_VR01);
+       ivch_write(dvo, VR40, priv->save_VR40);
+}
+
+static void ivch_destroy(struct intel_dvo_device *dvo)
+{
+       struct ivch_priv *priv = dvo->dev_priv;
+
+       if (priv) {
+               kfree(priv);
+               dvo->dev_priv = NULL;
+       }
+}
+
+struct intel_dvo_dev_ops ivch_ops= {
+       .init = ivch_init,
+       .dpms = ivch_dpms,
+       .save = ivch_save,
+       .restore = ivch_restore,
+       .mode_valid = ivch_mode_valid,
+       .mode_set = ivch_mode_set,
+       .detect = ivch_detect,
+       .dump_regs = ivch_dump_regs,
+       .destroy = ivch_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
new file mode 100644 (file)
index 0000000..033a4bb
--- /dev/null
@@ -0,0 +1,302 @@
+/**************************************************************************
+
+Copyright Â© 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "dvo.h"
+
+#define SIL164_VID 0x0001
+#define SIL164_DID 0x0006
+
+#define SIL164_VID_LO 0x00
+#define SIL164_VID_HI 0x01
+#define SIL164_DID_LO 0x02
+#define SIL164_DID_HI 0x03
+#define SIL164_REV    0x04
+#define SIL164_RSVD   0x05
+#define SIL164_FREQ_LO 0x06
+#define SIL164_FREQ_HI 0x07
+
+#define SIL164_REG8 0x08
+#define SIL164_8_VEN (1<<5)
+#define SIL164_8_HEN (1<<4)
+#define SIL164_8_DSEL (1<<3)
+#define SIL164_8_BSEL (1<<2)
+#define SIL164_8_EDGE (1<<1)
+#define SIL164_8_PD   (1<<0)
+
+#define SIL164_REG9 0x09
+#define SIL164_9_VLOW (1<<7)
+#define SIL164_9_MSEL_MASK (0x7<<4)
+#define SIL164_9_TSEL (1<<3)
+#define SIL164_9_RSEN (1<<2)
+#define SIL164_9_HTPLG (1<<1)
+#define SIL164_9_MDI (1<<0)
+
+#define SIL164_REGC 0x0c
+
+struct sil164_save_rec {
+       uint8_t reg8;
+       uint8_t reg9;
+       uint8_t regc;
+};
+
+struct sil164_priv {
+       //I2CDevRec d;
+       bool quiet;
+       struct sil164_save_rec save_regs;
+       struct sil164_save_rec mode_regs;
+};
+
+#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
+
+static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+       struct sil164_priv *sil = dvo->dev_priv;
+       struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+       u8 out_buf[2];
+       u8 in_buf[2];
+
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = i2cbus->slave_addr,
+                       .flags = 0,
+                       .len = 1,
+                       .buf = out_buf,
+               },
+               {
+                       .addr = i2cbus->slave_addr,
+                       .flags = I2C_M_RD,
+                       .len = 1,
+                       .buf = in_buf,
+               }
+       };
+
+       out_buf[0] = addr;
+       out_buf[1] = 0;
+
+       if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+               *ch = in_buf[0];
+               return true;
+       };
+
+       if (!sil->quiet) {
+               DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+                         addr, i2cbus->adapter.name, i2cbus->slave_addr);
+       }
+       return false;
+}
+
+static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+       struct sil164_priv *sil= dvo->dev_priv;
+       struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+       uint8_t out_buf[2];
+       struct i2c_msg msg = {
+               .addr = i2cbus->slave_addr,
+               .flags = 0,
+               .len = 2,
+               .buf = out_buf,
+       };
+
+       out_buf[0] = addr;
+       out_buf[1] = ch;
+
+       if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+               return true;
+
+       if (!sil->quiet) {
+               DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+                         addr, i2cbus->adapter.name, i2cbus->slave_addr);
+       }
+
+       return false;
+}
+
+/* Silicon Image 164 driver for chip on i2c bus */
+static bool sil164_init(struct intel_dvo_device *dvo,
+                       struct intel_i2c_chan *i2cbus)
+{
+       /* this will detect the SIL164 chip on the specified i2c bus */
+       struct sil164_priv *sil;
+       unsigned char ch;
+
+       sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL);
+       if (sil == NULL)
+               return false;
+
+       dvo->i2c_bus = i2cbus;
+       dvo->i2c_bus->slave_addr = dvo->slave_addr;
+       dvo->dev_priv = sil;
+       sil->quiet = true;
+
+       if (!sil164_readb(dvo, SIL164_VID_LO, &ch))
+               goto out;
+
+       if (ch != (SIL164_VID & 0xff)) {
+               DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+                         ch, i2cbus->adapter.name, i2cbus->slave_addr);
+               goto out;
+       }
+
+       if (!sil164_readb(dvo, SIL164_DID_LO, &ch))
+               goto out;
+
+       if (ch != (SIL164_DID & 0xff)) {
+               DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+                         ch, i2cbus->adapter.name, i2cbus->slave_addr);
+               goto out;
+       }
+       sil->quiet = false;
+
+       DRM_DEBUG("init sil164 dvo controller successfully!\n");
+       return true;
+
+out:
+       kfree(sil);
+       return false;
+}
+
+static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
+{
+       uint8_t reg9;
+
+       sil164_readb(dvo, SIL164_REG9, &reg9);
+
+       if (reg9 & SIL164_9_HTPLG)
+               return connector_status_connected;
+       else
+               return connector_status_disconnected;
+}
+
+static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
+                                             struct drm_display_mode *mode)
+{
+       return MODE_OK;
+}
+
+static void sil164_mode_set(struct intel_dvo_device *dvo,
+                           struct drm_display_mode *mode,
+                           struct drm_display_mode *adjusted_mode)
+{
+       /* As long as the basics are set up, since we don't have clock
+        * dependencies in the mode setup, we can just leave the
+        * registers alone and everything will work fine.
+        */
+       /* recommended programming sequence from doc */
+       /*sil164_writeb(sil, 0x08, 0x30);
+         sil164_writeb(sil, 0x09, 0x00);
+         sil164_writeb(sil, 0x0a, 0x90);
+         sil164_writeb(sil, 0x0c, 0x89);
+         sil164_writeb(sil, 0x08, 0x31);*/
+       /* don't do much */
+       return;
+}
+
+/* set the SIL164 power state */
+static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
+{
+       int ret;
+       unsigned char ch;
+
+       ret = sil164_readb(dvo, SIL164_REG8, &ch);
+       if (ret == false)
+               return;
+
+       if (mode == DRM_MODE_DPMS_ON)
+               ch |= SIL164_8_PD;
+       else
+               ch &= ~SIL164_8_PD;
+
+       sil164_writeb(dvo, SIL164_REG8, ch);
+       return;
+}
+
+static void sil164_dump_regs(struct intel_dvo_device *dvo)
+{
+       uint8_t val;
+
+       sil164_readb(dvo, SIL164_FREQ_LO, &val);
+       DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val);
+       sil164_readb(dvo, SIL164_FREQ_HI, &val);
+       DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val);
+       sil164_readb(dvo, SIL164_REG8, &val);
+       DRM_DEBUG("SIL164_REG8: 0x%02x\n", val);
+       sil164_readb(dvo, SIL164_REG9, &val);
+       DRM_DEBUG("SIL164_REG9: 0x%02x\n", val);
+       sil164_readb(dvo, SIL164_REGC, &val);
+       DRM_DEBUG("SIL164_REGC: 0x%02x\n", val);
+}
+
+static void sil164_save(struct intel_dvo_device *dvo)
+{
+       struct sil164_priv *sil= dvo->dev_priv;
+
+       if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
+               return;
+
+       if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
+               return;
+
+       if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
+               return;
+
+       return;
+}
+
+static void sil164_restore(struct intel_dvo_device *dvo)
+{
+       struct sil164_priv *sil = dvo->dev_priv;
+
+       /* Restore it powered down initially */
+       sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
+
+       sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
+       sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
+       sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
+}
+
+static void sil164_destroy(struct intel_dvo_device *dvo)
+{
+       struct sil164_priv *sil = dvo->dev_priv;
+
+       if (sil) {
+               kfree(sil);
+               dvo->dev_priv = NULL;
+       }
+}
+
+struct intel_dvo_dev_ops sil164_ops = {
+       .init = sil164_init,
+       .detect = sil164_detect,
+       .mode_valid = sil164_mode_valid,
+       .mode_set = sil164_mode_set,
+       .dpms = sil164_dpms,
+       .dump_regs = sil164_dump_regs,
+       .save = sil164_save,
+       .restore = sil164_restore,
+       .destroy = sil164_destroy,
+};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
new file mode 100644 (file)
index 0000000..207fda8
--- /dev/null
@@ -0,0 +1,335 @@
+/*
+ * Copyright Â© 2007 Dave Mueller
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dave Mueller <dave.mueller@gmx.ch>
+ *
+ */
+
+#include "dvo.h"
+
+/* register definitions according to the TFP410 data sheet */
+#define TFP410_VID             0x014C
+#define TFP410_DID             0x0410
+
+#define TFP410_VID_LO          0x00
+#define TFP410_VID_HI          0x01
+#define TFP410_DID_LO          0x02
+#define TFP410_DID_HI          0x03
+#define TFP410_REV             0x04
+
+#define TFP410_CTL_1           0x08
+#define TFP410_CTL_1_TDIS      (1<<6)
+#define TFP410_CTL_1_VEN       (1<<5)
+#define TFP410_CTL_1_HEN       (1<<4)
+#define TFP410_CTL_1_DSEL      (1<<3)
+#define TFP410_CTL_1_BSEL      (1<<2)
+#define TFP410_CTL_1_EDGE      (1<<1)
+#define TFP410_CTL_1_PD                (1<<0)
+
+#define TFP410_CTL_2           0x09
+#define TFP410_CTL_2_VLOW      (1<<7)
+#define TFP410_CTL_2_MSEL_MASK (0x7<<4)
+#define TFP410_CTL_2_MSEL      (1<<4)
+#define TFP410_CTL_2_TSEL      (1<<3)
+#define TFP410_CTL_2_RSEN      (1<<2)
+#define TFP410_CTL_2_HTPLG     (1<<1)
+#define TFP410_CTL_2_MDI       (1<<0)
+
+#define TFP410_CTL_3           0x0A
+#define TFP410_CTL_3_DK_MASK   (0x7<<5)
+#define TFP410_CTL_3_DK                (1<<5)
+#define TFP410_CTL_3_DKEN      (1<<4)
+#define TFP410_CTL_3_CTL_MASK  (0x7<<1)
+#define TFP410_CTL_3_CTL       (1<<1)
+
+#define TFP410_USERCFG         0x0B
+
+#define TFP410_DE_DLY          0x32
+
+#define TFP410_DE_CTL          0x33
+#define TFP410_DE_CTL_DEGEN    (1<<6)
+#define TFP410_DE_CTL_VSPOL    (1<<5)
+#define TFP410_DE_CTL_HSPOL    (1<<4)
+#define TFP410_DE_CTL_DEDLY8   (1<<0)
+
+#define TFP410_DE_TOP          0x34
+
+#define TFP410_DE_CNT_LO       0x36
+#define TFP410_DE_CNT_HI       0x37
+
+#define TFP410_DE_LIN_LO       0x38
+#define TFP410_DE_LIN_HI       0x39
+
+#define TFP410_H_RES_LO                0x3A
+#define TFP410_H_RES_HI                0x3B
+
+#define TFP410_V_RES_LO                0x3C
+#define TFP410_V_RES_HI                0x3D
+
+struct tfp410_save_rec {
+       uint8_t ctl1;
+       uint8_t ctl2;
+};
+
+struct tfp410_priv {
+       bool quiet;
+
+       struct tfp410_save_rec saved_reg;
+       struct tfp410_save_rec mode_reg;
+};
+
+static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+       struct tfp410_priv *tfp = dvo->dev_priv;
+       struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+       u8 out_buf[2];
+       u8 in_buf[2];
+
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = i2cbus->slave_addr,
+                       .flags = 0,
+                       .len = 1,
+                       .buf = out_buf,
+               },
+               {
+                       .addr = i2cbus->slave_addr,
+                       .flags = I2C_M_RD,
+                       .len = 1,
+                       .buf = in_buf,
+               }
+       };
+
+       out_buf[0] = addr;
+       out_buf[1] = 0;
+
+       if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+               *ch = in_buf[0];
+               return true;
+       };
+
+       if (!tfp->quiet) {
+               DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+                         addr, i2cbus->adapter.name, i2cbus->slave_addr);
+       }
+       return false;
+}
+
+static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+       struct tfp410_priv *tfp = dvo->dev_priv;
+       struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+       uint8_t out_buf[2];
+       struct i2c_msg msg = {
+               .addr = i2cbus->slave_addr,
+               .flags = 0,
+               .len = 2,
+               .buf = out_buf,
+       };
+
+       out_buf[0] = addr;
+       out_buf[1] = ch;
+
+       if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+               return true;
+
+       if (!tfp->quiet) {
+               DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+                         addr, i2cbus->adapter.name, i2cbus->slave_addr);
+       }
+
+       return false;
+}
+
+static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
+{
+       uint8_t ch1, ch2;
+
+       if (tfp410_readb(dvo, addr+0, &ch1) &&
+           tfp410_readb(dvo, addr+1, &ch2))
+               return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF);
+
+       return -1;
+}
+
+/* Ti TFP410 driver for chip on i2c bus */
+static bool tfp410_init(struct intel_dvo_device *dvo,
+                       struct intel_i2c_chan *i2cbus)
+{
+       /* this will detect the tfp410 chip on the specified i2c bus */
+       struct tfp410_priv *tfp;
+       int id;
+
+       tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL);
+       if (tfp == NULL)
+               return false;
+
+       dvo->i2c_bus = i2cbus;
+       dvo->i2c_bus->slave_addr = dvo->slave_addr;
+       dvo->dev_priv = tfp;
+       tfp->quiet = true;
+
+       if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
+               DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
+                         id, i2cbus->adapter.name, i2cbus->slave_addr);
+               goto out;
+       }
+
+       if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
+               DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
+                         id, i2cbus->adapter.name, i2cbus->slave_addr);
+               goto out;
+       }
+       tfp->quiet = false;
+       return true;
+out:
+       kfree(tfp);
+       return false;
+}
+
+static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
+{
+       enum drm_connector_status ret = connector_status_disconnected;
+       uint8_t ctl2;
+
+       if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
+               if (ctl2 & TFP410_CTL_2_HTPLG)
+                       ret = connector_status_connected;
+               else
+                       ret = connector_status_disconnected;
+       }
+
+       return ret;
+}
+
+static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
+                                             struct drm_display_mode *mode)
+{
+       return MODE_OK;
+}
+
+static void tfp410_mode_set(struct intel_dvo_device *dvo,
+                           struct drm_display_mode *mode,
+                           struct drm_display_mode *adjusted_mode)
+{
+    /* As long as the basics are set up, since we don't have clock dependencies
+     * in the mode setup, we can just leave the registers alone and everything
+     * will work fine.
+     */
+    /* don't do much */
+    return;
+}
+
+/* set the tfp410 power state */
+static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
+{
+       uint8_t ctl1;
+
+       if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+               return;
+
+       if (mode == DRM_MODE_DPMS_ON)
+               ctl1 |= TFP410_CTL_1_PD;
+       else
+               ctl1 &= ~TFP410_CTL_1_PD;
+
+       tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
+}
+
+static void tfp410_dump_regs(struct intel_dvo_device *dvo)
+{
+       uint8_t val, val2;
+
+       tfp410_readb(dvo, TFP410_REV, &val);
+       DRM_DEBUG("TFP410_REV: 0x%02X\n", val);
+       tfp410_readb(dvo, TFP410_CTL_1, &val);
+       DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val);
+       tfp410_readb(dvo, TFP410_CTL_2, &val);
+       DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val);
+       tfp410_readb(dvo, TFP410_CTL_3, &val);
+       DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val);
+       tfp410_readb(dvo, TFP410_USERCFG, &val);
+       DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val);
+       tfp410_readb(dvo, TFP410_DE_DLY, &val);
+       DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val);
+       tfp410_readb(dvo, TFP410_DE_CTL, &val);
+       DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val);
+       tfp410_readb(dvo, TFP410_DE_TOP, &val);
+       DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val);
+       tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
+       tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
+       DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+       tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
+       tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
+       DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+       tfp410_readb(dvo, TFP410_H_RES_LO, &val);
+       tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
+       DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+       tfp410_readb(dvo, TFP410_V_RES_LO, &val);
+       tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
+       DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+}
+
+static void tfp410_save(struct intel_dvo_device *dvo)
+{
+       struct tfp410_priv *tfp = dvo->dev_priv;
+
+       if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
+               return;
+
+       if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
+               return;
+}
+
+static void tfp410_restore(struct intel_dvo_device *dvo)
+{
+       struct tfp410_priv *tfp = dvo->dev_priv;
+
+       /* Restore it powered down initially */
+       tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
+
+       tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
+       tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
+}
+
+static void tfp410_destroy(struct intel_dvo_device *dvo)
+{
+       struct tfp410_priv *tfp = dvo->dev_priv;
+
+       if (tfp) {
+               kfree(tfp);
+               dvo->dev_priv = NULL;
+       }
+}
+
+struct intel_dvo_dev_ops tfp410_ops = {
+       .init = tfp410_init,
+       .detect = tfp410_detect,
+       .mode_valid = tfp410_mode_valid,
+       .mode_set = tfp410_mode_set,
+       .dpms = tfp410_dpms,
+       .dump_regs = tfp410_dump_regs,
+       .save = tfp410_save,
+       .restore = tfp410_restore,
+       .destroy = tfp410_destroy,
+};
index afa8a12cd00902c1c34ea4f4ba83e55c3b7a74e2..3d7082af5b72b5f5287412f7b6294d9947ace12c 100644 (file)
@@ -28,6 +28,8 @@
 
 #include "drmP.h"
 #include "drm.h"
+#include "drm_crtc_helper.h"
+#include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
 
@@ -39,6 +41,7 @@
 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
        u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
        u32 last_acthd = I915_READ(acthd_reg);
@@ -55,8 +58,8 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
                if (ring->space >= n)
                        return 0;
 
-               if (dev_priv->sarea_priv)
-                       dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+               if (master_priv->sarea_priv)
+                       master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
                if (ring->head != last_head)
                        i = 0;
@@ -121,16 +124,28 @@ static void i915_free_hws(struct drm_device *dev)
 void i915_kernel_lost_context(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_master_private *master_priv;
        drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
 
+       /*
+        * We should never lose context on the ring with modesetting
+        * as we don't expose it to userspace
+        */
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
        ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
        ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
        ring->space = ring->head - (ring->tail + 8);
        if (ring->space < 0)
                ring->space += ring->Size;
 
-       if (ring->head == ring->tail && dev_priv->sarea_priv)
-               dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
+       if (!dev->primary->master)
+               return;
+
+       master_priv = dev->primary->master->driver_priv;
+       if (ring->head == ring->tail && master_priv->sarea_priv)
+               master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
 }
 
 static int i915_dma_cleanup(struct drm_device * dev)
@@ -154,25 +169,13 @@ static int i915_dma_cleanup(struct drm_device * dev)
        if (I915_NEED_GFX_HWS(dev))
                i915_free_hws(dev);
 
-       dev_priv->sarea = NULL;
-       dev_priv->sarea_priv = NULL;
-
        return 0;
 }
 
 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-
-       dev_priv->sarea = drm_getsarea(dev);
-       if (!dev_priv->sarea) {
-               DRM_ERROR("can not find sarea!\n");
-               i915_dma_cleanup(dev);
-               return -EINVAL;
-       }
-
-       dev_priv->sarea_priv = (drm_i915_sarea_t *)
-           ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
+       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 
        if (init->ring_size != 0) {
                if (dev_priv->ring.ring_obj != NULL) {
@@ -207,7 +210,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
        dev_priv->back_offset = init->back_offset;
        dev_priv->front_offset = init->front_offset;
        dev_priv->current_page = 0;
-       dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+       if (master_priv->sarea_priv)
+               master_priv->sarea_priv->pf_current_page = 0;
 
        /* Allow hardware batchbuffers unless told otherwise.
         */
@@ -222,11 +226,6 @@ static int i915_dma_resume(struct drm_device * dev)
 
        DRM_DEBUG("%s\n", __func__);
 
-       if (!dev_priv->sarea) {
-               DRM_ERROR("can not find sarea!\n");
-               return -EINVAL;
-       }
-
        if (dev_priv->ring.map.handle == NULL) {
                DRM_ERROR("can not ioremap virtual address for"
                          " ring buffer\n");
@@ -435,13 +434,14 @@ i915_emit_box(struct drm_device *dev,
 static void i915_emit_breadcrumb(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        RING_LOCALS;
 
        dev_priv->counter++;
        if (dev_priv->counter > 0x7FFFFFFFUL)
                dev_priv->counter = 0;
-       if (dev_priv->sarea_priv)
-               dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+       if (master_priv->sarea_priv)
+               master_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
        BEGIN_LP_RING(4);
        OUT_RING(MI_STORE_DWORD_INDEX);
@@ -537,15 +537,17 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
 static int i915_dispatch_flip(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_master_private *master_priv =
+               dev->primary->master->driver_priv;
        RING_LOCALS;
 
-       if (!dev_priv->sarea_priv)
+       if (!master_priv->sarea_priv)
                return -EINVAL;
 
        DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
                  __func__,
                  dev_priv->current_page,
-                 dev_priv->sarea_priv->pf_current_page);
+                 master_priv->sarea_priv->pf_current_page);
 
        i915_kernel_lost_context(dev);
 
@@ -572,7 +574,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
        OUT_RING(0);
        ADVANCE_LP_RING();
 
-       dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+       master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
 
        BEGIN_LP_RING(4);
        OUT_RING(MI_STORE_DWORD_INDEX);
@@ -581,7 +583,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
        OUT_RING(0);
        ADVANCE_LP_RING();
 
-       dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+       master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
        return 0;
 }
 
@@ -611,8 +613,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
                            struct drm_file *file_priv)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
-           dev_priv->sarea_priv;
+           master_priv->sarea_priv;
        drm_i915_batchbuffer_t *batch = data;
        int ret;
 
@@ -644,8 +647,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
                          struct drm_file *file_priv)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
-           dev_priv->sarea_priv;
+           master_priv->sarea_priv;
        drm_i915_cmdbuffer_t *cmdbuf = data;
        int ret;
 
@@ -774,6 +778,11 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               WARN(1, "tried to set status page when mode setting active\n");
+               return 0;
+       }
+
        printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
 
        dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
@@ -802,6 +811,214 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
        return 0;
 }
 
+/**
+ * i915_probe_agp - get AGP bootup configuration
+ * @pdev: PCI device
+ * @aperture_size: returns AGP aperture configured size
+ * @preallocated_size: returns size of BIOS preallocated AGP space
+ *
+ * Since Intel integrated graphics are UMA, the BIOS has to set aside
+ * some RAM for the framebuffer at early boot.  This code figures out
+ * how much was set aside so we can use it for our own purposes.
+ */
+static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size,
+                         unsigned long *preallocated_size)
+{
+       struct pci_dev *bridge_dev;
+       u16 tmp = 0;
+       unsigned long overhead;
+
+       bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+       if (!bridge_dev) {
+               DRM_ERROR("bridge device not found\n");
+               return -1;
+       }
+
+       /* Get the fb aperture size and "stolen" memory amount. */
+       pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
+       pci_dev_put(bridge_dev);
+
+       *aperture_size = 1024 * 1024;
+       *preallocated_size = 1024 * 1024;
+
+       switch (dev->pdev->device) {
+       case PCI_DEVICE_ID_INTEL_82830_CGC:
+       case PCI_DEVICE_ID_INTEL_82845G_IG:
+       case PCI_DEVICE_ID_INTEL_82855GM_IG:
+       case PCI_DEVICE_ID_INTEL_82865_IG:
+               if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
+                       *aperture_size *= 64;
+               else
+                       *aperture_size *= 128;
+               break;
+       default:
+               /* 9xx supports large sizes, just look at the length */
+               *aperture_size = pci_resource_len(dev->pdev, 2);
+               break;
+       }
+
+       /*
+        * Some of the preallocated space is taken by the GTT
+        * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
+        */
+       if (IS_G4X(dev))
+               overhead = 4096;
+       else
+               overhead = (*aperture_size / 1024) + 4096;
+
+       switch (tmp & INTEL_855_GMCH_GMS_MASK) {
+       case INTEL_855_GMCH_GMS_STOLEN_1M:
+               break; /* 1M already */
+       case INTEL_855_GMCH_GMS_STOLEN_4M:
+               *preallocated_size *= 4;
+               break;
+       case INTEL_855_GMCH_GMS_STOLEN_8M:
+               *preallocated_size *= 8;
+               break;
+       case INTEL_855_GMCH_GMS_STOLEN_16M:
+               *preallocated_size *= 16;
+               break;
+       case INTEL_855_GMCH_GMS_STOLEN_32M:
+               *preallocated_size *= 32;
+               break;
+       case INTEL_915G_GMCH_GMS_STOLEN_48M:
+               *preallocated_size *= 48;
+               break;
+       case INTEL_915G_GMCH_GMS_STOLEN_64M:
+               *preallocated_size *= 64;
+               break;
+       case INTEL_855_GMCH_GMS_DISABLED:
+               DRM_ERROR("video memory is disabled\n");
+               return -1;
+       default:
+               DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+                       tmp & INTEL_855_GMCH_GMS_MASK);
+               return -1;
+       }
+       *preallocated_size -= overhead;
+
+       return 0;
+}
+
+static int i915_load_modeset_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long agp_size, prealloc_size;
+       int fb_bar = IS_I9XX(dev) ? 2 : 0;
+       int ret = 0;
+
+       dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
+       if (!dev->devname) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
+               0xff000000;
+
+       DRM_DEBUG("*** fb base 0x%08lx\n", dev->mode_config.fb_base);
+
+       if (IS_MOBILE(dev) || (IS_I9XX(dev) && !IS_I965G(dev) && !IS_G33(dev)))
+               dev_priv->cursor_needs_physical = true;
+       else
+               dev_priv->cursor_needs_physical = false;
+
+       ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
+       if (ret)
+               goto kfree_devname;
+
+       /* Basic memrange allocator for stolen space (aka vram) */
+       drm_mm_init(&dev_priv->vram, 0, prealloc_size);
+
+       /* Let GEM Manage from end of prealloc space to end of aperture */
+       i915_gem_do_init(dev, prealloc_size, agp_size);
+
+       ret = i915_gem_init_ringbuffer(dev);
+       if (ret)
+               goto kfree_devname;
+
+        dev_priv->mm.gtt_mapping =
+               io_mapping_create_wc(dev->agp->base,
+                                    dev->agp->agp_info.aper_size * 1024*1024);
+
+       /* Allow hardware batchbuffers unless told otherwise.
+        */
+       dev_priv->allow_batchbuffer = 1;
+
+       ret = intel_init_bios(dev);
+       if (ret)
+               DRM_INFO("failed to find VBIOS tables\n");
+
+       ret = drm_irq_install(dev);
+       if (ret)
+               goto destroy_ringbuffer;
+
+       /* FIXME: re-add hotplug support */
+#if 0
+       ret = drm_hotplug_init(dev);
+       if (ret)
+               goto destroy_ringbuffer;
+#endif
+
+       /* Always safe in the mode setting case. */
+       /* FIXME: do pre/post-mode set stuff in core KMS code */
+       dev->vblank_disable_allowed = 1;
+
+       /*
+        * Initialize the hardware status page IRQ location.
+        */
+
+       I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
+
+       intel_modeset_init(dev);
+
+       drm_helper_initial_config(dev, false);
+
+       return 0;
+
+destroy_ringbuffer:
+       i915_gem_cleanup_ringbuffer(dev);
+kfree_devname:
+       kfree(dev->devname);
+out:
+       return ret;
+}
+
+int i915_master_create(struct drm_device *dev, struct drm_master *master)
+{
+       struct drm_i915_master_private *master_priv;
+
+       master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
+       if (!master_priv)
+               return -ENOMEM;
+
+       master->driver_priv = master_priv;
+       return 0;
+}
+
+void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
+{
+       struct drm_i915_master_private *master_priv = master->driver_priv;
+
+       if (!master_priv)
+               return;
+
+       drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
+
+       master->driver_priv = NULL;
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ *   - drive output discovery via intel_modeset_init()
+ *   - initialize the memory manager
+ *   - allocate initial config memory
+ *   - setup the DRM framebuffer with the allocated memory
+ */
 int i915_driver_load(struct drm_device *dev, unsigned long flags)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -829,6 +1046,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        size = drm_get_resource_len(dev, mmio_bar);
 
        dev_priv->regs = ioremap(base, size);
+       if (!dev_priv->regs) {
+               DRM_ERROR("failed to map registers\n");
+               ret = -EIO;
+               goto free_priv;
+       }
 
 #ifdef CONFIG_HIGHMEM64G
        /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
@@ -844,7 +1066,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (!I915_NEED_GFX_HWS(dev)) {
                ret = i915_init_phys_hws(dev);
                if (ret != 0)
-                       return ret;
+                       goto out_rmmap;
        }
 
        /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -864,6 +1086,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        intel_opregion_init(dev);
 
        spin_lock_init(&dev_priv->user_irq_lock);
+       dev_priv->user_irq_refcount = 0;
 
        ret = drm_vblank_init(dev, I915_NUM_PIPE);
 
@@ -872,6 +1095,20 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                return ret;
        }
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               ret = i915_load_modeset_init(dev);
+               if (ret < 0) {
+                       DRM_ERROR("failed to init modeset\n");
+                       goto out_rmmap;
+               }
+       }
+
+       return 0;
+
+out_rmmap:
+       iounmap(dev_priv->regs);
+free_priv:
+       drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
        return ret;
 }
 
@@ -879,16 +1116,29 @@ int i915_driver_unload(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               io_mapping_free(dev_priv->mm.gtt_mapping);
+               drm_irq_uninstall(dev);
+       }
+
        if (dev->pdev->msi_enabled)
                pci_disable_msi(dev->pdev);
 
-       i915_free_hws(dev);
-
        if (dev_priv->regs != NULL)
                iounmap(dev_priv->regs);
 
        intel_opregion_free(dev);
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               intel_modeset_cleanup(dev);
+
+               mutex_lock(&dev->struct_mutex);
+               i915_gem_cleanup_ringbuffer(dev);
+               mutex_unlock(&dev->struct_mutex);
+               drm_mm_takedown(&dev_priv->vram);
+               i915_gem_lastclose(dev);
+       }
+
        drm_free(dev->dev_private, sizeof(drm_i915_private_t),
                 DRM_MEM_DRIVER);
 
@@ -914,12 +1164,26 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
        return 0;
 }
 
+/**
+ * i915_driver_lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited.  In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ *
+ * Additionally, in the non-mode setting case, we'll tear down the AGP
+ * and DMA structures, since the kernel won't be using them, and clea
+ * up any GEM state.
+ */
 void i915_driver_lastclose(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       if (!dev_priv)
+       if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
+               intelfb_restore();
                return;
+       }
 
        i915_gem_lastclose(dev);
 
@@ -932,7 +1196,8 @@ void i915_driver_lastclose(struct drm_device * dev)
 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       i915_mem_release(dev, file_priv, dev_priv->agp_heap);
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               i915_mem_release(dev, file_priv, dev_priv->agp_heap);
 }
 
 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
@@ -972,6 +1237,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
        DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
        DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
+       DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0),
        DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
        DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
        DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
index a80ead215282b933aacdb5cfab3b3b45f286d74a..f8b3df0926c015a24bda63d9c7ed1756801c2797 100644 (file)
 #include "i915_drv.h"
 
 #include "drm_pciids.h"
+#include <linux/console.h>
+
+static unsigned int i915_modeset = -1;
+module_param_named(modeset, i915_modeset, int, 0400);
+
+unsigned int i915_fbpercrtc = 0;
+module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
 
 static struct pci_device_id pciidlist[] = {
        i915_PCI_IDS
 };
 
+#if defined(CONFIG_DRM_I915_KMS)
+MODULE_DEVICE_TABLE(pci, pciidlist);
+#endif
+
 static int i915_suspend(struct drm_device *dev, pm_message_t state)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -81,6 +92,10 @@ static int i915_resume(struct drm_device *dev)
        return 0;
 }
 
+static struct vm_operations_struct i915_gem_vm_ops = {
+       .fault = i915_gem_fault,
+};
+
 static struct drm_driver driver = {
        /* don't use mtrr's here, the Xserver or user space app should
         * deal with them for intel hardware.
@@ -107,17 +122,20 @@ static struct drm_driver driver = {
        .reclaim_buffers = drm_core_reclaim_buffers,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
+       .master_create = i915_master_create,
+       .master_destroy = i915_master_destroy,
        .proc_init = i915_gem_proc_init,
        .proc_cleanup = i915_gem_proc_cleanup,
        .gem_init_object = i915_gem_init_object,
        .gem_free_object = i915_gem_free_object,
+       .gem_vm_ops = &i915_gem_vm_ops,
        .ioctls = i915_ioctls,
        .fops = {
                 .owner = THIS_MODULE,
                 .open = drm_open,
                 .release = drm_release,
                 .ioctl = drm_ioctl,
-                .mmap = drm_mmap,
+                .mmap = drm_gem_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
@@ -141,6 +159,28 @@ static struct drm_driver driver = {
 static int __init i915_init(void)
 {
        driver.num_ioctls = i915_max_ioctl;
+
+       /*
+        * If CONFIG_DRM_I915_KMS is set, default to KMS unless
+        * explicitly disabled with the module pararmeter.
+        *
+        * Otherwise, just follow the parameter (defaulting to off).
+        *
+        * Allow optional vga_text_mode_force boot option to override
+        * the default behavior.
+        */
+#if defined(CONFIG_DRM_I915_KMS)
+       if (i915_modeset != 0)
+               driver.driver_features |= DRIVER_MODESET;
+#endif
+       if (i915_modeset == 1)
+               driver.driver_features |= DRIVER_MODESET;
+
+#ifdef CONFIG_VGA_CONSOLE
+       if (vgacon_text_force() && i915_modeset == -1)
+               driver.driver_features &= ~DRIVER_MODESET;
+#endif
+
        return drm_init(&driver);
 }
 
index b3cc4731aa7c07b43d2d9825068f3e8a70ce34bd..4756e5cd6b5e1142781c0dfe35b5f85aeb0df886 100644 (file)
@@ -31,6 +31,7 @@
 #define _I915_DRV_H_
 
 #include "i915_reg.h"
+#include "intel_bios.h"
 #include <linux/io-mapping.h>
 
 /* General customization:
@@ -103,15 +104,23 @@ struct intel_opregion {
        int enabled;
 };
 
+struct drm_i915_master_private {
+       drm_local_map_t *sarea;
+       struct _drm_i915_sarea *sarea_priv;
+};
+#define I915_FENCE_REG_NONE -1
+
+struct drm_i915_fence_reg {
+       struct drm_gem_object *obj;
+};
+
 typedef struct drm_i915_private {
        struct drm_device *dev;
 
        int has_gem;
 
        void __iomem *regs;
-       drm_local_map_t *sarea;
 
-       drm_i915_sarea_t *sarea_priv;
        drm_i915_ring_buffer_t ring;
 
        drm_dma_handle_t *status_page_dmah;
@@ -144,8 +153,30 @@ typedef struct drm_i915_private {
        unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
        int vblank_pipe;
 
+       bool cursor_needs_physical;
+
+       struct drm_mm vram;
+
+       int irq_enabled;
+
        struct intel_opregion opregion;
 
+       /* LVDS info */
+       int backlight_duty_cycle;  /* restore backlight to this value */
+       bool panel_wants_dither;
+       struct drm_display_mode *panel_fixed_mode;
+       struct drm_display_mode *vbt_mode; /* if any */
+
+       /* Feature bits from the VBIOS */
+       unsigned int int_tv_support:1;
+       unsigned int lvds_dither:1;
+       unsigned int lvds_vbt:1;
+       unsigned int int_crt_support:1;
+
+       struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
+       int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+       int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
        /* Register state */
        u8 saveLBB;
        u32 saveDSPACNTR;
@@ -364,6 +395,21 @@ struct drm_i915_gem_object {
         * This is the same as gtt_space->start
         */
        uint32_t gtt_offset;
+       /**
+        * Required alignment for the object
+        */
+       uint32_t gtt_alignment;
+       /**
+        * Fake offset for use by mmap(2)
+        */
+       uint64_t mmap_offset;
+
+       /**
+        * Fence register bits (if any) for this object.  Will be set
+        * as needed when mapped into the GTT.
+        * Protected by dev->struct_mutex.
+        */
+       int fence_reg;
 
        /** Boolean whether this object has a valid gtt offset. */
        int gtt_bound;
@@ -376,6 +422,7 @@ struct drm_i915_gem_object {
 
        /** Current tiling mode for the object. */
        uint32_t tiling_mode;
+       uint32_t stride;
 
        /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
        uint32_t agp_type;
@@ -385,6 +432,10 @@ struct drm_i915_gem_object {
         * flags which individual pages are valid.
         */
        uint8_t *page_cpu_valid;
+
+       /** User space pin count and filp owning the pin */
+       uint32_t user_pin_count;
+       struct drm_file *pin_filp;
 };
 
 /**
@@ -414,8 +465,19 @@ struct drm_i915_file_private {
        } mm;
 };
 
+enum intel_chip_family {
+       CHIP_I8XX = 0x01,
+       CHIP_I9XX = 0x02,
+       CHIP_I915 = 0x04,
+       CHIP_I965 = 0x08,
+};
+
 extern struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
+extern unsigned int i915_fbpercrtc;
+
+extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
+extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
 
                                /* i915_dma.c */
 extern void i915_kernel_lost_context(struct drm_device * dev);
@@ -441,6 +503,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
 void i915_user_irq_get(struct drm_device *dev);
 void i915_user_irq_put(struct drm_device *dev);
+extern void i915_enable_interrupt (struct drm_device *dev);
 
 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
 extern void i915_driver_irq_preinstall(struct drm_device * dev);
@@ -487,6 +550,8 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *file_priv);
 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
+int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *file_priv);
 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
@@ -523,6 +588,16 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev);
 void i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_work_handler(struct work_struct *work);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
+int i915_gem_object_set_domain(struct drm_gem_object *obj,
+                              uint32_t read_domains,
+                              uint32_t write_domain);
+int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+                    unsigned long end);
+int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+                                     int write);
 
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -561,6 +636,10 @@ static inline void opregion_asle_intr(struct drm_device *dev) { return; }
 static inline void opregion_enable_asle(struct drm_device *dev) { return; }
 #endif
 
+/* modesetting */
+extern void intel_modeset_init(struct drm_device *dev);
+extern void intel_modeset_cleanup(struct drm_device *dev);
+
 /**
  * Lock test for when it's just for synchronization of ring access.
  *
@@ -578,6 +657,13 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; }
 #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
 #define I915_READ8(reg)                readb(dev_priv->regs + (reg))
 #define I915_WRITE8(reg, val)  writeb(val, dev_priv->regs + (reg))
+#ifdef writeq
+#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
+#else
+#define I915_WRITE64(reg, val) (writel(val, dev_priv->regs + (reg)), \
+                                writel(upper_32_bits(val), dev_priv->regs + \
+                                       (reg) + 4))
+#endif
 
 #define I915_VERBOSE 0
 
@@ -660,7 +746,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 
 #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
                     (dev)->pci_device == 0x2E12 || \
-                    (dev)->pci_device == 0x2E22)
+                    (dev)->pci_device == 0x2E22 || \
+                    IS_GM45(dev))
 
 #define IS_G33(dev)    ((dev)->pci_device == 0x29C2 || \
                        (dev)->pci_device == 0x29B2 ||  \
index 24fe8c10b4b22c6bac1cfa17d79d55f02dd59179..cc2ca5561feb5add0ebac9bd6d3a7d58e4008c4c 100644 (file)
@@ -30,6 +30,7 @@
 #include "i915_drm.h"
 #include "i915_drv.h"
 #include <linux/swap.h>
+#include <linux/pci.h>
 
 #define I915_GEM_GPU_DOMAINS   (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
 
@@ -40,8 +41,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
-                                            int write);
 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
                                             int write);
 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
@@ -51,34 +50,43 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o
 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
+                                          unsigned alignment);
+static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
+static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
+static int i915_gem_evict_something(struct drm_device *dev);
+
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+                    unsigned long end)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
-static void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+       if (start >= end ||
+           (start & (PAGE_SIZE - 1)) != 0 ||
+           (end & (PAGE_SIZE - 1)) != 0) {
+               return -EINVAL;
+       }
+
+       drm_mm_init(&dev_priv->mm.gtt_space, start,
+                   end - start);
+
+       dev->gtt_total = (uint32_t) (end - start);
+
+       return 0;
+}
 
 int
 i915_gem_init_ioctl(struct drm_device *dev, void *data,
                    struct drm_file *file_priv)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_init *args = data;
+       int ret;
 
        mutex_lock(&dev->struct_mutex);
-
-       if (args->gtt_start >= args->gtt_end ||
-           (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
-           (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
-               mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
-       }
-
-       drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
-           args->gtt_end - args->gtt_start);
-
-       dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
-
+       ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
        mutex_unlock(&dev->struct_mutex);
 
-       return 0;
+       return ret;
 }
 
 int
@@ -529,6 +537,252 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+/**
+ * i915_gem_fault - fault a page into the GTT
+ * vma: VMA in question
+ * vmf: fault info
+ *
+ * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
+ * from userspace.  The fault handler takes care of binding the object to
+ * the GTT (if needed), allocating and programming a fence register (again,
+ * only if needed based on whether the old reg is still valid or the object
+ * is tiled) and inserting a new PTE into the faulting process.
+ *
+ * Note that the faulting process may involve evicting existing objects
+ * from the GTT and/or fence registers to make room.  So performance may
+ * suffer if the GTT working set is large or there are few fence registers
+ * left.
+ */
+int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct drm_gem_object *obj = vma->vm_private_data;
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       pgoff_t page_offset;
+       unsigned long pfn;
+       int ret = 0;
+
+       /* We don't use vmf->pgoff since that has the fake offset */
+       page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+               PAGE_SHIFT;
+
+       /* Now bind it into the GTT if needed */
+       mutex_lock(&dev->struct_mutex);
+       if (!obj_priv->gtt_space) {
+               ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+               if (ret) {
+                       mutex_unlock(&dev->struct_mutex);
+                       return VM_FAULT_SIGBUS;
+               }
+               list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
+       }
+
+       /* Need a new fence register? */
+       if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
+           obj_priv->tiling_mode != I915_TILING_NONE)
+               i915_gem_object_get_fence_reg(obj);
+
+       pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
+               page_offset;
+
+       /* Finally, remap it using the new GTT offset */
+       ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       switch (ret) {
+       case -ENOMEM:
+       case -EAGAIN:
+               return VM_FAULT_OOM;
+       case -EFAULT:
+       case -EBUSY:
+               DRM_ERROR("can't insert pfn??  fault or busy...\n");
+               return VM_FAULT_SIGBUS;
+       default:
+               return VM_FAULT_NOPAGE;
+       }
+}
+
+/**
+ * i915_gem_create_mmap_offset - create a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call.  The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj.
+ */
+static int
+i915_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_gem_mm *mm = dev->mm_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_map_list *list;
+       struct drm_map *map;
+       int ret = 0;
+
+       /* Set the object up for mmap'ing */
+       list = &obj->map_list;
+       list->map = drm_calloc(1, sizeof(struct drm_map_list),
+                              DRM_MEM_DRIVER);
+       if (!list->map)
+               return -ENOMEM;
+
+       map = list->map;
+       map->type = _DRM_GEM;
+       map->size = obj->size;
+       map->handle = obj;
+
+       /* Get a DRM GEM mmap offset allocated... */
+       list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+                                                   obj->size / PAGE_SIZE, 0, 0);
+       if (!list->file_offset_node) {
+               DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+               ret = -ENOMEM;
+               goto out_free_list;
+       }
+
+       list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+                                                 obj->size / PAGE_SIZE, 0);
+       if (!list->file_offset_node) {
+               ret = -ENOMEM;
+               goto out_free_list;
+       }
+
+       list->hash.key = list->file_offset_node->start;
+       if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
+               DRM_ERROR("failed to add to map hash\n");
+               goto out_free_mm;
+       }
+
+       /* By now we should be all set, any drm_mmap request on the offset
+        * below will get to our mmap & fault handler */
+       obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
+
+       return 0;
+
+out_free_mm:
+       drm_mm_put_block(list->file_offset_node);
+out_free_list:
+       drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
+
+       return ret;
+}
+
+/**
+ * i915_gem_get_gtt_alignment - return required GTT alignment for an object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, taking into account
+ * potential fence register mapping if needed.
+ */
+static uint32_t
+i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int start, i;
+
+       /*
+        * Minimum alignment is 4k (GTT page size), but might be greater
+        * if a fence register is needed for the object.
+        */
+       if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
+               return 4096;
+
+       /*
+        * Previous chips need to be aligned to the size of the smallest
+        * fence register that can contain the object.
+        */
+       if (IS_I9XX(dev))
+               start = 1024*1024;
+       else
+               start = 512*1024;
+
+       for (i = start; i < obj->size; i <<= 1)
+               ;
+
+       return i;
+}
+
+/**
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * @dev: DRM device
+ * @data: GTT mapping ioctl data
+ * @file_priv: GEM object info
+ *
+ * Simply returns the fake offset to userspace so it can mmap it.
+ * The mmap call will end up in drm_gem_mmap(), which will set things
+ * up so we can get faults in the handler above.
+ *
+ * The fault handler will take care of binding the object into the GTT
+ * (since it may have been evicted to make room for something), allocating
+ * a fence register, and mapping the appropriate aperture address into
+ * userspace.
+ */
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       struct drm_i915_gem_mmap_gtt *args = data;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       int ret;
+
+       if (!(dev->driver->driver_features & DRIVER_GEM))
+               return -ENODEV;
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       if (obj == NULL)
+               return -EBADF;
+
+       mutex_lock(&dev->struct_mutex);
+
+       obj_priv = obj->driver_private;
+
+       if (!obj_priv->mmap_offset) {
+               ret = i915_gem_create_mmap_offset(obj);
+               if (ret)
+                       return ret;
+       }
+
+       args->offset = obj_priv->mmap_offset;
+
+       obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
+
+       /* Make sure the alignment is correct for fence regs etc */
+       if (obj_priv->agp_mem &&
+           (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+
+       /*
+        * Pull it into the GTT so that we have a page list (makes the
+        * initial fault faster and any subsequent flushing possible).
+        */
+       if (!obj_priv->agp_mem) {
+               ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
+               if (ret) {
+                       drm_gem_object_unreference(obj);
+                       mutex_unlock(&dev->struct_mutex);
+                       return ret;
+               }
+               list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
+       }
+
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
 static void
 i915_gem_object_free_page_list(struct drm_gem_object *obj)
 {
@@ -726,6 +980,7 @@ i915_gem_retire_request(struct drm_device *dev,
                 */
                if (obj_priv->last_rendering_seqno != request->seqno)
                        return;
+
 #if WATCH_LRU
                DRM_INFO("%s: retire %d moves to inactive list %p\n",
                         __func__, request->seqno, obj);
@@ -956,6 +1211,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       loff_t offset;
        int ret = 0;
 
 #if WATCH_BUF
@@ -991,6 +1247,14 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 
        BUG_ON(obj_priv->active);
 
+       /* blow away mappings if mapped through GTT */
+       offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
+       if (dev->dev_mapping)
+               unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
+
+       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+               i915_gem_clear_fence_reg(obj);
+
        i915_gem_object_free_page_list(obj);
 
        if (obj_priv->gtt_space) {
@@ -1149,6 +1413,204 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
        return 0;
 }
 
+static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
+{
+       struct drm_gem_object *obj = reg->obj;
+       struct drm_device *dev = obj->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int regnum = obj_priv->fence_reg;
+       uint64_t val;
+
+       val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+                   0xfffff000) << 32;
+       val |= obj_priv->gtt_offset & 0xfffff000;
+       val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+       if (obj_priv->tiling_mode == I915_TILING_Y)
+               val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+       val |= I965_FENCE_REG_VALID;
+
+       I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
+}
+
+static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
+{
+       struct drm_gem_object *obj = reg->obj;
+       struct drm_device *dev = obj->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int regnum = obj_priv->fence_reg;
+       uint32_t val;
+       uint32_t pitch_val;
+
+       if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
+           (obj_priv->gtt_offset & (obj->size - 1))) {
+               WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
+               return;
+       }
+
+       if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) ||
+                                                      IS_I945GM(dev) ||
+                                                      IS_G33(dev)))
+               pitch_val = (obj_priv->stride / 128) - 1;
+       else
+               pitch_val = (obj_priv->stride / 512) - 1;
+
+       val = obj_priv->gtt_offset;
+       if (obj_priv->tiling_mode == I915_TILING_Y)
+               val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+       val |= I915_FENCE_SIZE_BITS(obj->size);
+       val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+       val |= I830_FENCE_REG_VALID;
+
+       I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+}
+
+static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
+{
+       struct drm_gem_object *obj = reg->obj;
+       struct drm_device *dev = obj->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int regnum = obj_priv->fence_reg;
+       uint32_t val;
+       uint32_t pitch_val;
+
+       if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
+           (obj_priv->gtt_offset & (obj->size - 1))) {
+               WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__);
+               return;
+       }
+
+       pitch_val = (obj_priv->stride / 128) - 1;
+
+       val = obj_priv->gtt_offset;
+       if (obj_priv->tiling_mode == I915_TILING_Y)
+               val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+       val |= I830_FENCE_SIZE_BITS(obj->size);
+       val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+       val |= I830_FENCE_REG_VALID;
+
+       I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+
+}
+
+/**
+ * i915_gem_object_get_fence_reg - set up a fence reg for an object
+ * @obj: object to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ *
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ */
+static void
+i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_i915_fence_reg *reg = NULL;
+       int i, ret;
+
+       switch (obj_priv->tiling_mode) {
+       case I915_TILING_NONE:
+               WARN(1, "allocating a fence for non-tiled object?\n");
+               break;
+       case I915_TILING_X:
+               WARN(obj_priv->stride & (512 - 1),
+                    "object is X tiled but has non-512B pitch\n");
+               break;
+       case I915_TILING_Y:
+               WARN(obj_priv->stride & (128 - 1),
+                    "object is Y tiled but has non-128B pitch\n");
+               break;
+       }
+
+       /* First try to find a free reg */
+       for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+               reg = &dev_priv->fence_regs[i];
+               if (!reg->obj)
+                       break;
+       }
+
+       /* None available, try to steal one or wait for a user to finish */
+       if (i == dev_priv->num_fence_regs) {
+               struct drm_i915_gem_object *old_obj_priv = NULL;
+               loff_t offset;
+
+try_again:
+               /* Could try to use LRU here instead... */
+               for (i = dev_priv->fence_reg_start;
+                    i < dev_priv->num_fence_regs; i++) {
+                       reg = &dev_priv->fence_regs[i];
+                       old_obj_priv = reg->obj->driver_private;
+                       if (!old_obj_priv->pin_count)
+                               break;
+               }
+
+               /*
+                * Now things get ugly... we have to wait for one of the
+                * objects to finish before trying again.
+                */
+               if (i == dev_priv->num_fence_regs) {
+                       ret = i915_gem_object_wait_rendering(reg->obj);
+                       if (ret) {
+                               WARN(ret, "wait_rendering failed: %d\n", ret);
+                               return;
+                       }
+                       goto try_again;
+               }
+
+               /*
+                * Zap this virtual mapping so we can set up a fence again
+                * for this object next time we need it.
+                */
+               offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
+               if (dev->dev_mapping)
+                       unmap_mapping_range(dev->dev_mapping, offset,
+                                           reg->obj->size, 1);
+               old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
+       }
+
+       obj_priv->fence_reg = i;
+       reg->obj = obj;
+
+       if (IS_I965G(dev))
+               i965_write_fence_reg(reg);
+       else if (IS_I9XX(dev))
+               i915_write_fence_reg(reg);
+       else
+               i830_write_fence_reg(reg);
+}
+
+/**
+ * i915_gem_clear_fence_reg - clear out fence register info
+ * @obj: object to clear
+ *
+ * Zeroes out the fence register itself and clears out the associated
+ * data structures in dev_priv and obj_priv.
+ */
+static void
+i915_gem_clear_fence_reg(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+       if (IS_I965G(dev))
+               I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
+       else
+               I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0);
+
+       dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
+       obj_priv->fence_reg = I915_FENCE_REG_NONE;
+}
+
 /**
  * Finds free space in the GTT aperture and binds the object there.
  */
@@ -1307,7 +1769,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
  * This function returns when the move is complete, including waiting on
  * flushes to occur.
  */
-static int
+int
 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 {
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -2029,13 +2491,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
                /* error other than GTT full, or we've already tried again */
                if (ret != -ENOMEM || pin_tries >= 1) {
-                       DRM_ERROR("Failed to pin buffers %d\n", ret);
+                       if (ret != -ERESTARTSYS)
+                               DRM_ERROR("Failed to pin buffers %d\n", ret);
                        goto err;
                }
 
                /* unpin all of our buffers */
                for (i = 0; i < pinned; i++)
                        i915_gem_object_unpin(object_list[i]);
+               pinned = 0;
 
                /* evict everyone we can from the aperture */
                ret = i915_gem_evict_everything(dev);
@@ -2149,13 +2613,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
                          "back to user (%d)\n",
                           args->buffer_count, ret);
 err:
-       if (object_list != NULL) {
-               for (i = 0; i < pinned; i++)
-                       i915_gem_object_unpin(object_list[i]);
+       for (i = 0; i < pinned; i++)
+               i915_gem_object_unpin(object_list[i]);
+
+       for (i = 0; i < args->buffer_count; i++)
+               drm_gem_object_unreference(object_list[i]);
 
-               for (i = 0; i < args->buffer_count; i++)
-                       drm_gem_object_unreference(object_list[i]);
-       }
        mutex_unlock(&dev->struct_mutex);
 
 pre_mutex_err:
@@ -2178,7 +2641,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
        if (obj_priv->gtt_space == NULL) {
                ret = i915_gem_object_bind_to_gtt(obj, alignment);
                if (ret != 0) {
-                       DRM_ERROR("Failure to bind: %d", ret);
+                       if (ret != -ERESTARTSYS)
+                               DRM_ERROR("Failure to bind: %d", ret);
                        return ret;
                }
        }
@@ -2249,11 +2713,22 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
        }
        obj_priv = obj->driver_private;
 
-       ret = i915_gem_object_pin(obj, args->alignment);
-       if (ret != 0) {
-               drm_gem_object_unreference(obj);
+       if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
+               DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
+                         args->handle);
                mutex_unlock(&dev->struct_mutex);
-               return ret;
+               return -EINVAL;
+       }
+
+       obj_priv->user_pin_count++;
+       obj_priv->pin_filp = file_priv;
+       if (obj_priv->user_pin_count == 1) {
+               ret = i915_gem_object_pin(obj, args->alignment);
+               if (ret != 0) {
+                       drm_gem_object_unreference(obj);
+                       mutex_unlock(&dev->struct_mutex);
+                       return ret;
+               }
        }
 
        /* XXX - flush the CPU caches for pinned objects
@@ -2273,6 +2748,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_pin *args = data;
        struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
 
        mutex_lock(&dev->struct_mutex);
 
@@ -2284,7 +2760,19 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
                return -EBADF;
        }
 
-       i915_gem_object_unpin(obj);
+       obj_priv = obj->driver_private;
+       if (obj_priv->pin_filp != file_priv) {
+               DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
+                         args->handle);
+               drm_gem_object_unreference(obj);
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
+       obj_priv->user_pin_count--;
+       if (obj_priv->user_pin_count == 0) {
+               obj_priv->pin_filp = NULL;
+               i915_gem_object_unpin(obj);
+       }
 
        drm_gem_object_unreference(obj);
        mutex_unlock(&dev->struct_mutex);
@@ -2351,12 +2839,18 @@ int i915_gem_init_object(struct drm_gem_object *obj)
 
        obj->driver_private = obj_priv;
        obj_priv->obj = obj;
+       obj_priv->fence_reg = I915_FENCE_REG_NONE;
        INIT_LIST_HEAD(&obj_priv->list);
+
        return 0;
 }
 
 void i915_gem_free_object(struct drm_gem_object *obj)
 {
+       struct drm_device *dev = obj->dev;
+       struct drm_gem_mm *mm = dev->mm_private;
+       struct drm_map_list *list;
+       struct drm_map *map;
        struct drm_i915_gem_object *obj_priv = obj->driver_private;
 
        while (obj_priv->pin_count > 0)
@@ -2364,6 +2858,20 @@ void i915_gem_free_object(struct drm_gem_object *obj)
 
        i915_gem_object_unbind(obj);
 
+       list = &obj->map_list;
+       drm_ht_remove_item(&mm->offset_hash, &list->hash);
+
+       if (list->file_offset_node) {
+               drm_mm_put_block(list->file_offset_node);
+               list->file_offset_node = NULL;
+       }
+
+       map = list->map;
+       if (map) {
+               drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
+               list->map = NULL;
+       }
+
        drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
        drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
 }
@@ -2432,8 +2940,7 @@ i915_gem_idle(struct drm_device *dev)
         */
        i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
                       ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
-       seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
-                                       I915_GEM_DOMAIN_GTT));
+       seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
 
        if (seqno == 0) {
                mutex_unlock(&dev->struct_mutex);
@@ -2560,12 +3067,13 @@ i915_gem_init_hws(struct drm_device *dev)
        return 0;
 }
 
-static int
+int
 i915_gem_init_ringbuffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
+       drm_i915_ring_buffer_t *ring = &dev_priv->ring;
        int ret;
        u32 head;
 
@@ -2587,24 +3095,24 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
        }
 
        /* Set up the kernel mapping for the ring. */
-       dev_priv->ring.Size = obj->size;
-       dev_priv->ring.tail_mask = obj->size - 1;
+       ring->Size = obj->size;
+       ring->tail_mask = obj->size - 1;
 
-       dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
-       dev_priv->ring.map.size = obj->size;
-       dev_priv->ring.map.type = 0;
-       dev_priv->ring.map.flags = 0;
-       dev_priv->ring.map.mtrr = 0;
+       ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+       ring->map.size = obj->size;
+       ring->map.type = 0;
+       ring->map.flags = 0;
+       ring->map.mtrr = 0;
 
-       drm_core_ioremap_wc(&dev_priv->ring.map, dev);
-       if (dev_priv->ring.map.handle == NULL) {
+       drm_core_ioremap_wc(&ring->map, dev);
+       if (ring->map.handle == NULL) {
                DRM_ERROR("Failed to map ringbuffer.\n");
                memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
                drm_gem_object_unreference(obj);
                return -EINVAL;
        }
-       dev_priv->ring.ring_obj = obj;
-       dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+       ring->ring_obj = obj;
+       ring->virtual_start = ring->map.handle;
 
        /* Stop the ring if it's running. */
        I915_WRITE(PRB0_CTL, 0);
@@ -2652,12 +3160,20 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
        }
 
        /* Update our cache of the ring state */
-       i915_kernel_lost_context(dev);
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               i915_kernel_lost_context(dev);
+       else {
+               ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+               ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+               ring->space = ring->head - (ring->tail + 8);
+               if (ring->space < 0)
+                       ring->space += ring->Size;
+       }
 
        return 0;
 }
 
-static void
+void
 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2695,6 +3211,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return 0;
+
        if (dev_priv->mm.wedged) {
                DRM_ERROR("Reenabling wedged hardware, good luck\n");
                dev_priv->mm.wedged = 0;
@@ -2728,6 +3247,9 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return 0;
+
        ret = i915_gem_idle(dev);
        drm_irq_uninstall(dev);
 
@@ -2758,5 +3280,13 @@ i915_gem_load(struct drm_device *dev)
                          i915_gem_retire_work_handler);
        dev_priv->mm.next_gem_seqno = 1;
 
+       /* Old X drivers will take 0-2 for front, back, depth buffers */
+       dev_priv->fence_reg_start = 3;
+
+       if (IS_I965G(dev))
+               dev_priv->num_fence_regs = 16;
+       else
+               dev_priv->num_fence_regs = 8;
+
        i915_gem_detect_bit_6_swizzle(dev);
 }
index e8d5abe1250ee14404bf8f9da972e34fd74ff6f0..4d1b9de0cd8b11ef176cb766a2f07fdda5c8b84f 100644 (file)
@@ -250,6 +250,39 @@ static int i915_interrupt_info(char *buf, char **start, off_t offset,
        return len - offset;
 }
 
+static int i915_hws_info(char *buf, char **start, off_t offset,
+                        int request, int *eof, void *data)
+{
+       struct drm_minor *minor = (struct drm_minor *) data;
+       struct drm_device *dev = minor->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int len = 0, i;
+       volatile u32 *hws;
+
+       if (offset > DRM_PROC_LIMIT) {
+               *eof = 1;
+               return 0;
+       }
+
+       hws = (volatile u32 *)dev_priv->hw_status_page;
+       if (hws == NULL) {
+               *eof = 1;
+               return 0;
+       }
+
+       *start = &buf[offset];
+       *eof = 0;
+       for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
+               DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                              i * 4,
+                              hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
+       }
+       if (len > request + offset)
+               return request;
+       *eof = 1;
+       return len - offset;
+}
+
 static struct drm_proc_list {
        /** file name */
        const char *name;
@@ -262,6 +295,7 @@ static struct drm_proc_list {
        {"i915_gem_request", i915_gem_request_info},
        {"i915_gem_seqno", i915_gem_seqno_info},
        {"i915_gem_interrupt", i915_interrupt_info},
+       {"i915_gem_hws", i915_hws_info},
 };
 
 #define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
index a8cb69469c641ff28f4d57df20b51d0dd5772f7c..241f39b7f460251e0570c1e21ac9b3cccb53b8fc 100644 (file)
@@ -208,6 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                }
        }
        obj_priv->tiling_mode = args->tiling_mode;
+       obj_priv->stride = args->stride;
 
        mutex_unlock(&dev->struct_mutex);
 
index 69b9a42da95ed720e85841c00ab7580fdff94830..0cadafbef411ba094d020d1323f66f136ba0d1bd 100644 (file)
@@ -30,6 +30,7 @@
 #include "drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
+#include "intel_drv.h"
 
 #define MAX_NOPID ((u32)~0)
 
 #define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \
                                    I915_INTERRUPT_ENABLE_VAR)
 
+#define I915_PIPE_VBLANK_STATUS        (PIPE_START_VBLANK_INTERRUPT_STATUS |\
+                                PIPE_VBLANK_INTERRUPT_STATUS)
+
+#define I915_PIPE_VBLANK_ENABLE        (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
+                                PIPE_VBLANK_INTERRUPT_ENABLE)
+
+#define DRM_I915_VBLANK_PIPE_ALL       (DRM_I915_VBLANK_PIPE_A | \
+                                        DRM_I915_VBLANK_PIPE_B)
+
 void
 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
@@ -168,6 +178,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       struct drm_i915_master_private *master_priv;
        u32 iir, new_iir;
        u32 pipea_stats, pipeb_stats;
        u32 vblank_status;
@@ -200,6 +211,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
                pipea_stats = I915_READ(PIPEASTAT);
                pipeb_stats = I915_READ(PIPEBSTAT);
+
                /*
                 * Clear the PIPE(A|B)STAT regs before the IIR
                 */
@@ -222,9 +234,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                I915_WRITE(IIR, iir);
                new_iir = I915_READ(IIR); /* Flush posted writes */
 
-               if (dev_priv->sarea_priv)
-                       dev_priv->sarea_priv->last_dispatch =
-                               READ_BREADCRUMB(dev_priv);
+               if (dev->primary->master) {
+                       master_priv = dev->primary->master->driver_priv;
+                       if (master_priv->sarea_priv)
+                               master_priv->sarea_priv->last_dispatch =
+                                       READ_BREADCRUMB(dev_priv);
+               }
 
                if (iir & I915_USER_INTERRUPT) {
                        dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
@@ -269,6 +284,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 static int i915_emit_irq(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        RING_LOCALS;
 
        i915_kernel_lost_context(dev);
@@ -278,8 +294,8 @@ static int i915_emit_irq(struct drm_device * dev)
        dev_priv->counter++;
        if (dev_priv->counter > 0x7FFFFFFFUL)
                dev_priv->counter = 1;
-       if (dev_priv->sarea_priv)
-               dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
+       if (master_priv->sarea_priv)
+               master_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
        BEGIN_LP_RING(4);
        OUT_RING(MI_STORE_DWORD_INDEX);
@@ -317,21 +333,20 @@ void i915_user_irq_put(struct drm_device *dev)
 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
        int ret = 0;
 
        DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
                  READ_BREADCRUMB(dev_priv));
 
        if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
-               if (dev_priv->sarea_priv) {
-                       dev_priv->sarea_priv->last_dispatch =
-                               READ_BREADCRUMB(dev_priv);
-               }
+               if (master_priv->sarea_priv)
+                       master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
                return 0;
        }
 
-       if (dev_priv->sarea_priv)
-               dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+       if (master_priv->sarea_priv)
+               master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
        i915_user_irq_get(dev);
        DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
@@ -343,10 +358,6 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
                          READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
        }
 
-       if (dev_priv->sarea_priv)
-               dev_priv->sarea_priv->last_dispatch =
-                       READ_BREADCRUMB(dev_priv);
-
        return ret;
 }
 
@@ -427,6 +438,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
        spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
 }
 
+void i915_enable_interrupt (struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       opregion_enable_asle(dev);
+       dev_priv->irq_enabled = 1;
+}
+
+
 /* Set the vblank monitor pipe
  */
 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
@@ -487,6 +506,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
+       atomic_set(&dev_priv->irq_received, 0);
+
        I915_WRITE(HWSTAM, 0xeffe);
        I915_WRITE(PIPEASTAT, 0);
        I915_WRITE(PIPEBSTAT, 0);
index 6126a60dc9cb19ceb335b9e335a04ecfe6d25559..96e271986d2a70197804c898e9d07a86dad85070 100644 (file)
@@ -46,7 +46,8 @@
 static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
+       struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+       drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv;
        struct drm_tex_region *list;
        unsigned shift, nr;
        unsigned start;
index 13ae731a33dba577b9a7326a9fe22daf8c1baf1c..ff012835a3863400cd54adfcbc392c4e59ea2d9a 100644 (file)
@@ -257,8 +257,8 @@ void opregion_enable_asle(struct drm_device *dev)
 
 static struct intel_opregion *system_opregion;
 
-int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
-                              void *data)
+static int intel_opregion_video_event(struct notifier_block *nb,
+                                     unsigned long val, void *data)
 {
        /* The only video events relevant to opregion are 0x80. These indicate
           either a docking event, lid switch or display switch request. In
index 9d24aaeb8a453d76e2d23e2223af3bfc527a74fa..47e6bafeb743d759416afaacc718bf3cae5c03db 100644 (file)
 #define   DISPLAY_PLANE_B           (1<<20)
 
 /*
- * Instruction and interrupt control regs
+ * Fence registers
  */
+#define FENCE_REG_830_0                        0x2000
+#define   I830_FENCE_START_MASK                0x07f80000
+#define   I830_FENCE_TILING_Y_SHIFT    12
+#define   I830_FENCE_SIZE_BITS(size)   ((get_order(size >> 19) - 1) << 8)
+#define   I830_FENCE_PITCH_SHIFT       4
+#define   I830_FENCE_REG_VALID         (1<<0)
+
+#define   I915_FENCE_START_MASK                0x0ff00000
+#define   I915_FENCE_SIZE_BITS(size)   ((get_order(size >> 20) - 1) << 8)
 
+#define FENCE_REG_965_0                        0x03000
+#define   I965_FENCE_PITCH_SHIFT       2
+#define   I965_FENCE_TILING_Y_SHIFT    1
+#define   I965_FENCE_REG_VALID         (1<<0)
+
+/*
+ * Instruction and interrupt control regs
+ */
 #define PRB0_TAIL      0x02030
 #define PRB0_HEAD      0x02034
 #define PRB0_START     0x02038
 #define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
 #define GFX_FLSH_CNTL  0x02170 /* 915+ only */
 
+
 /*
  * Framebuffer compression (915+ only)
  */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
new file mode 100644 (file)
index 0000000..4ca82a0
--- /dev/null
@@ -0,0 +1,193 @@
+/*
+ * Copyright Â© 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_bios.h"
+
+
+static void *
+find_section(struct bdb_header *bdb, int section_id)
+{
+       u8 *base = (u8 *)bdb;
+       int index = 0;
+       u16 total, current_size;
+       u8 current_id;
+
+       /* skip to first section */
+       index += bdb->header_size;
+       total = bdb->bdb_size;
+
+       /* walk the sections looking for section_id */
+       while (index < total) {
+               current_id = *(base + index);
+               index++;
+               current_size = *((u16 *)(base + index));
+               index += 2;
+               if (current_id == section_id)
+                       return base + index;
+               index += current_size;
+       }
+
+       return NULL;
+}
+
+/* Try to find panel data */
+static void
+parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+       struct bdb_lvds_options *lvds_options;
+       struct bdb_lvds_lfp_data *lvds_lfp_data;
+       struct bdb_lvds_lfp_data_entry *entry;
+       struct lvds_dvo_timing *dvo_timing;
+       struct drm_display_mode *panel_fixed_mode;
+
+       /* Defaults if we can't find VBT info */
+       dev_priv->lvds_dither = 0;
+       dev_priv->lvds_vbt = 0;
+
+       lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+       if (!lvds_options)
+               return;
+
+       dev_priv->lvds_dither = lvds_options->pixel_dither;
+       if (lvds_options->panel_type == 0xff)
+               return;
+
+       lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+       if (!lvds_lfp_data)
+               return;
+
+       dev_priv->lvds_vbt = 1;
+
+       entry = &lvds_lfp_data->data[lvds_options->panel_type];
+       dvo_timing = &entry->dvo_timing;
+
+       panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode),
+                                     DRM_MEM_DRIVER);
+
+       panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+               dvo_timing->hactive_lo;
+       panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+               ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+       panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+               dvo_timing->hsync_pulse_width;
+       panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+               ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+       panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+               dvo_timing->vactive_lo;
+       panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+               dvo_timing->vsync_off;
+       panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+               dvo_timing->vsync_pulse_width;
+       panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+               ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+       panel_fixed_mode->clock = dvo_timing->clock * 10;
+       panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+       drm_mode_set_name(panel_fixed_mode);
+
+       dev_priv->vbt_mode = panel_fixed_mode;
+
+       DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
+       drm_mode_debug_printmodeline(panel_fixed_mode);
+
+       return;
+}
+
+static void
+parse_general_features(struct drm_i915_private *dev_priv,
+                      struct bdb_header *bdb)
+{
+       struct bdb_general_features *general;
+
+       /* Set sensible defaults in case we can't find the general block */
+       dev_priv->int_tv_support = 1;
+       dev_priv->int_crt_support = 1;
+
+       general = find_section(bdb, BDB_GENERAL_FEATURES);
+       if (general) {
+               dev_priv->int_tv_support = general->int_tv_support;
+               dev_priv->int_crt_support = general->int_crt_support;
+       }
+}
+
+/**
+ * intel_init_bios - initialize VBIOS settings & find VBT
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
+ * to appropriate values.
+ *
+ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
+ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
+ * feed an updated VBT back through that, compared to what we'll fetch using
+ * this method of groping around in the BIOS data.
+ *
+ * Returns 0 on success, nonzero on failure.
+ */
+bool
+intel_init_bios(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct pci_dev *pdev = dev->pdev;
+       struct vbt_header *vbt = NULL;
+       struct bdb_header *bdb;
+       u8 __iomem *bios;
+       size_t size;
+       int i;
+
+       bios = pci_map_rom(pdev, &size);
+       if (!bios)
+               return -1;
+
+       /* Scour memory looking for the VBT signature */
+       for (i = 0; i + 4 < size; i++) {
+               if (!memcmp(bios + i, "$VBT", 4)) {
+                       vbt = (struct vbt_header *)(bios + i);
+                       break;
+               }
+       }
+
+       if (!vbt) {
+               DRM_ERROR("VBT signature missing\n");
+               pci_unmap_rom(pdev, bios);
+               return -1;
+       }
+
+       bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+
+       /* Grab useful general definitions */
+       parse_general_features(dev_priv, bdb);
+       parse_panel_data(dev_priv, bdb);
+
+       pci_unmap_rom(pdev, bios);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
new file mode 100644 (file)
index 0000000..5ea715a
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+ * Copyright Â© 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _I830_BIOS_H_
+#define _I830_BIOS_H_
+
+#include "drmP.h"
+
+struct vbt_header {
+       u8 signature[20];               /**< Always starts with 'VBT$' */
+       u16 version;                    /**< decimal */
+       u16 header_size;                /**< in bytes */
+       u16 vbt_size;                   /**< in bytes */
+       u8 vbt_checksum;
+       u8 reserved0;
+       u32 bdb_offset;                 /**< from beginning of VBT */
+       u32 aim_offset[4];              /**< from beginning of VBT */
+} __attribute__((packed));
+
+struct bdb_header {
+       u8 signature[16];               /**< Always 'BIOS_DATA_BLOCK' */
+       u16 version;                    /**< decimal */
+       u16 header_size;                /**< in bytes */
+       u16 bdb_size;                   /**< in bytes */
+};
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+       u8 type; /* 0 == desktop, 1 == mobile */
+       u8 relstage;
+       u8 chipset;
+       u8 lvds_present:1;
+       u8 tv_present:1;
+       u8 rsvd2:6; /* finish byte */
+       u8 rsvd3[4];
+       u8 signon[155];
+       u8 copyright[61];
+       u16 code_segment;
+       u8 dos_boot_mode;
+       u8 bandwidth_percent;
+       u8 rsvd4; /* popup memory size */
+       u8 resize_pci_bios;
+       u8 rsvd5; /* is crt already on ddc2 */
+} __attribute__((packed));
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES     1
+#define BDB_GENERAL_DEFINITIONS          2
+#define BDB_OLD_TOGGLE_LIST      3
+#define BDB_MODE_SUPPORT_LIST    4
+#define BDB_GENERIC_MODE_TABLE   5
+#define BDB_EXT_MMIO_REGS        6
+#define BDB_SWF_IO               7
+#define BDB_SWF_MMIO             8
+#define BDB_DOT_CLOCK_TABLE      9
+#define BDB_MODE_REMOVAL_TABLE  10
+#define BDB_CHILD_DEVICE_TABLE  11
+#define BDB_DRIVER_FEATURES     12
+#define BDB_DRIVER_PERSISTENCE  13
+#define BDB_EXT_TABLE_PTRS      14
+#define BDB_DOT_CLOCK_OVERRIDE  15
+#define BDB_DISPLAY_SELECT      16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION     18
+#define BDB_DISPLAY_REMOVE      19
+#define BDB_OEM_CUSTOM          20
+#define BDB_EFP_LIST            21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS   22
+#define BDB_SDVO_PANEL_DTDS     23
+#define BDB_SDVO_LVDS_PNP_IDS   24
+#define BDB_SDVO_LVDS_POWER_SEQ         25
+#define BDB_TV_OPTIONS          26
+#define BDB_LVDS_OPTIONS        40
+#define BDB_LVDS_LFP_DATA_PTRS  41
+#define BDB_LVDS_LFP_DATA       42
+#define BDB_LVDS_BACKLIGHT      43
+#define BDB_LVDS_POWER          44
+#define BDB_SKIP               254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+        /* bits 1 */
+       u8 panel_fitting:2;
+       u8 flexaim:1;
+       u8 msg_enable:1;
+       u8 clear_screen:3;
+       u8 color_flip:1;
+
+        /* bits 2 */
+       u8 download_ext_vbt:1;
+       u8 enable_ssc:1;
+       u8 ssc_freq:1;
+       u8 enable_lfp_on_override:1;
+       u8 disable_ssc_ddt:1;
+       u8 rsvd8:3; /* finish byte */
+
+        /* bits 3 */
+       u8 disable_smooth_vision:1;
+       u8 single_dvi:1;
+       u8 rsvd9:6; /* finish byte */
+
+        /* bits 4 */
+       u8 legacy_monitor_detect;
+
+        /* bits 5 */
+       u8 int_crt_support:1;
+       u8 int_tv_support:1;
+       u8 rsvd11:6; /* finish byte */
+} __attribute__((packed));
+
+struct bdb_general_definitions {
+       /* DDC GPIO */
+       u8 crt_ddc_gmbus_pin;
+
+       /* DPMS bits */
+       u8 dpms_acpi:1;
+       u8 skip_boot_crt_detect:1;
+       u8 dpms_aim:1;
+       u8 rsvd1:5; /* finish byte */
+
+       /* boot device bits */
+       u8 boot_display[2];
+       u8 child_dev_size;
+
+       /* device info */
+       u8 tv_or_lvds_info[33];
+       u8 dev1[33];
+       u8 dev2[33];
+       u8 dev3[33];
+       u8 dev4[33];
+       /* may be another device block here on some platforms */
+};
+
+struct bdb_lvds_options {
+       u8 panel_type;
+       u8 rsvd1;
+       /* LVDS capabilities, stored in a dword */
+       u8 rsvd2:1;
+       u8 lvds_edid:1;
+       u8 pixel_dither:1;
+       u8 pfit_ratio_auto:1;
+       u8 pfit_gfx_mode_enhanced:1;
+       u8 pfit_text_mode_enhanced:1;
+       u8 pfit_mode:2;
+       u8 rsvd4;
+} __attribute__((packed));
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+       u16 fp_timing_offset; /* offsets are from start of bdb */
+       u8 fp_table_size;
+       u16 dvo_timing_offset;
+       u8 dvo_table_size;
+       u16 panel_pnp_id_offset;
+       u8 pnp_table_size;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_ptrs {
+       u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+       struct bdb_lvds_lfp_data_ptr ptr[16];
+} __attribute__((packed));
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+       u16 x_res;
+       u16 y_res;
+       u32 lvds_reg;
+       u32 lvds_reg_val;
+       u32 pp_on_reg;
+       u32 pp_on_reg_val;
+       u32 pp_off_reg;
+       u32 pp_off_reg_val;
+       u32 pp_cycle_reg;
+       u32 pp_cycle_reg_val;
+       u32 pfit_reg;
+       u32 pfit_reg_val;
+       u16 terminator;
+} __attribute__((packed));
+
+struct lvds_dvo_timing {
+       u16 clock;              /**< In 10khz */
+       u8 hactive_lo;
+       u8 hblank_lo;
+       u8 hblank_hi:4;
+       u8 hactive_hi:4;
+       u8 vactive_lo;
+       u8 vblank_lo;
+       u8 vblank_hi:4;
+       u8 vactive_hi:4;
+       u8 hsync_off_lo;
+       u8 hsync_pulse_width;
+       u8 vsync_pulse_width:4;
+       u8 vsync_off:4;
+       u8 rsvd0:6;
+       u8 hsync_off_hi:2;
+       u8 h_image;
+       u8 v_image;
+       u8 max_hv;
+       u8 h_border;
+       u8 v_border;
+       u8 rsvd1:3;
+       u8 digital:2;
+       u8 vsync_positive:1;
+       u8 hsync_positive:1;
+       u8 rsvd2:1;
+} __attribute__((packed));
+
+struct lvds_pnp_id {
+       u16 mfg_name;
+       u16 product_code;
+       u32 serial;
+       u8 mfg_week;
+       u8 mfg_year;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_entry {
+       struct lvds_fp_timing fp_timing;
+       struct lvds_dvo_timing dvo_timing;
+       struct lvds_pnp_id pnp_id;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data {
+       struct bdb_lvds_lfp_data_entry data[16];
+} __attribute__((packed));
+
+struct aimdb_header {
+       char signature[16];
+       char oem_device[20];
+       u16 aimdb_version;
+       u16 aimdb_header_size;
+       u16 aimdb_size;
+} __attribute__((packed));
+
+struct aimdb_block {
+       u8 aimdb_id;
+       u16 aimdb_size;
+} __attribute__((packed));
+
+struct vch_panel_data {
+       u16 fp_timing_offset;
+       u8 fp_timing_size;
+       u16 dvo_timing_offset;
+       u8 dvo_timing_size;
+       u16 text_fitting_offset;
+       u8 text_fitting_size;
+       u16 graphics_fitting_offset;
+       u8 graphics_fitting_size;
+} __attribute__((packed));
+
+struct vch_bdb_22 {
+       struct aimdb_block aimdb_block;
+       struct vch_panel_data panels[16];
+} __attribute__((packed));
+
+bool intel_init_bios(struct drm_device *dev);
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN  (1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK       0x78 /* See also SWF4 15:0 */
+#define   GR18_HK_NONE         (0x0<<3)
+#define   GR18_HK_LFP_STRETCH  (0x1<<3)
+#define   GR18_HK_TOGGLE_DISP  (0x2<<3)
+#define   GR18_HK_DISP_SWITCH  (0x4<<3) /* see SWF14 15:0 for what to enable */
+#define   GR18_HK_POPUP_DISABLED (0x6<<3)
+#define   GR18_HK_POPUP_ENABLED        (0x7<<3)
+#define   GR18_HK_PFIT         (0x8<<3)
+#define   GR18_HK_APM_CHANGE   (0xa<<3)
+#define   GR18_HK_MULTIPLE     (0xc<<3)
+#define GR18_USER_INT_EN       (1<<2)
+#define GR18_A0000_FLUSH_EN    (1<<1)
+#define GR18_SMM_EN            (1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT       16
+#define SWF00_XRES_SHIFT       0
+#define SWF00_RES_MASK         0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT 8
+#define SWF01_TV1_FORMAT_SHIFT 0
+#define SWF01_TV_FORMAT_MASK   0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
+#define SWF10_GTT_OVERRIDE_EN  (1<<28)
+#define SWF10_LFP_DPMS_OVR     (1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define   SWF10_OLD_TOGGLE     0x0
+#define   SWF10_TOGGLE_LIST_1  0x1
+#define   SWF10_TOGGLE_LIST_2  0x2
+#define   SWF10_TOGGLE_LIST_3  0x3
+#define   SWF10_TOGGLE_LIST_4  0x4
+#define SWF10_PANNING_EN       (1<<23)
+#define SWF10_DRIVER_LOADED    (1<<22)
+#define SWF10_EXTENDED_DESKTOP (1<<21)
+#define SWF10_EXCLUSIVE_MODE   (1<<20)
+#define SWF10_OVERLAY_EN       (1<<19)
+#define SWF10_PLANEB_HOLDOFF   (1<<18)
+#define SWF10_PLANEA_HOLDOFF   (1<<17)
+#define SWF10_VGA_HOLDOFF      (1<<16)
+#define SWF10_ACTIVE_DISP_MASK 0xffff
+#define   SWF10_PIPEB_LFP2     (1<<15)
+#define   SWF10_PIPEB_EFP2     (1<<14)
+#define   SWF10_PIPEB_TV2      (1<<13)
+#define   SWF10_PIPEB_CRT2     (1<<12)
+#define   SWF10_PIPEB_LFP      (1<<11)
+#define   SWF10_PIPEB_EFP      (1<<10)
+#define   SWF10_PIPEB_TV       (1<<9)
+#define   SWF10_PIPEB_CRT      (1<<8)
+#define   SWF10_PIPEA_LFP2     (1<<7)
+#define   SWF10_PIPEA_EFP2     (1<<6)
+#define   SWF10_PIPEA_TV2      (1<<5)
+#define   SWF10_PIPEA_CRT2     (1<<4)
+#define   SWF10_PIPEA_LFP      (1<<3)
+#define   SWF10_PIPEA_EFP      (1<<2)
+#define   SWF10_PIPEA_TV       (1<<1)
+#define   SWF10_PIPEA_CRT      (1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT        16
+#define SWF11_SV_TEST_EN       (1<<15)
+#define SWF11_IS_AGP           (1<<14)
+#define SWF11_DISPLAY_HOLDOFF  (1<<13)
+#define SWF11_DPMS_REDUCED     (1<<12)
+#define SWF11_IS_VBE_MODE      (1<<11)
+#define SWF11_PIPEB_ACCESS     (1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK                0x07
+#define   SWF11_DPMS_OFF       (1<<2)
+#define   SWF11_DPMS_SUSPEND   (1<<1)
+#define   SWF11_DPMS_STANDBY   (1<<0)
+#define   SWF11_DPMS_ON                0
+
+#define SWF14_GFX_PFIT_EN      (1<<31)
+#define SWF14_TEXT_PFIT_EN     (1<<30)
+#define SWF14_LID_STATUS_CLOSED        (1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN         (1<<28)
+#define SWF14_DISPLAY_HOLDOFF  (1<<27)
+#define SWF14_DISP_DETECT_EN   (1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS    (1<<24)
+#define SWF14_OS_TYPE_WIN9X    (1<<23)
+#define SWF14_OS_TYPE_WINNT    (1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK     0x00070000
+#define   SWF14_PM_ACPI_VIDEO  (0x4 << 16)
+#define   SWF14_PM_ACPI                (0x3 << 16)
+#define   SWF14_PM_APM_12      (0x2 << 16)
+#define   SWF14_PM_APM_11      (0x1 << 16)
+#define SWF14_HK_REQUEST_MASK  0x0000ffff /* see GR18 6:3 for event type */
+          /* if GR18 indicates a display switch */
+#define   SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define   SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define   SWF14_DS_PIPEB_TV2_EN  (1<<13)
+#define   SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define   SWF14_DS_PIPEB_LFP_EN  (1<<11)
+#define   SWF14_DS_PIPEB_EFP_EN  (1<<10)
+#define   SWF14_DS_PIPEB_TV_EN   (1<<9)
+#define   SWF14_DS_PIPEB_CRT_EN  (1<<8)
+#define   SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define   SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define   SWF14_DS_PIPEA_TV2_EN  (1<<5)
+#define   SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define   SWF14_DS_PIPEA_LFP_EN  (1<<3)
+#define   SWF14_DS_PIPEA_EFP_EN  (1<<2)
+#define   SWF14_DS_PIPEA_TV_EN   (1<<1)
+#define   SWF14_DS_PIPEA_CRT_EN  (1<<0)
+          /* if GR18 indicates a panel fitting request */
+#define   SWF14_PFIT_EN                (1<<0) /* 0 means disable */
+          /* if GR18 indicates an APM change request */
+#define   SWF14_APM_HIBERNATE  0x4
+#define   SWF14_APM_SUSPEND    0x3
+#define   SWF14_APM_STANDBY    0x1
+#define   SWF14_APM_RESTORE    0x0
+
+#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
new file mode 100644 (file)
index 0000000..dcaed34
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 temp;
+
+       temp = I915_READ(ADPA);
+       temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+       temp &= ~ADPA_DAC_ENABLE;
+
+       switch(mode) {
+       case DRM_MODE_DPMS_ON:
+               temp |= ADPA_DAC_ENABLE;
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+               temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+               break;
+       case DRM_MODE_DPMS_SUSPEND:
+               temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+               break;
+       case DRM_MODE_DPMS_OFF:
+               temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+               break;
+       }
+
+       I915_WRITE(ADPA, temp);
+}
+
+static int intel_crt_mode_valid(struct drm_connector *connector,
+                               struct drm_display_mode *mode)
+{
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       if (mode->clock > 400000 || mode->clock < 25000)
+               return MODE_CLOCK_RANGE;
+
+       return MODE_OK;
+}
+
+static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
+                                struct drm_display_mode *mode,
+                                struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static void intel_crt_mode_set(struct drm_encoder *encoder,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode)
+{
+
+       struct drm_device *dev = encoder->dev;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int dpll_md_reg;
+       u32 adpa, dpll_md;
+
+       if (intel_crtc->pipe == 0)
+               dpll_md_reg = DPLL_A_MD;
+       else
+               dpll_md_reg = DPLL_B_MD;
+
+       /*
+        * Disable separate mode multiplier used when cloning SDVO to CRT
+        * XXX this needs to be adjusted when we really are cloning
+        */
+       if (IS_I965G(dev)) {
+               dpll_md = I915_READ(dpll_md_reg);
+               I915_WRITE(dpll_md_reg,
+                          dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+       }
+
+       adpa = 0;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+               adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+               adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+       if (intel_crtc->pipe == 0)
+               adpa |= ADPA_PIPE_A_SELECT;
+       else
+               adpa |= ADPA_PIPE_B_SELECT;
+
+       I915_WRITE(ADPA, adpa);
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * Not for i915G/i915GM
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+static bool intel_crt_detect_hotplug(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 temp;
+
+       unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+       temp = I915_READ(PORT_HOTPLUG_EN);
+
+       I915_WRITE(PORT_HOTPLUG_EN,
+                  temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
+
+       do {
+               if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
+                       break;
+               msleep(1);
+       } while (time_after(timeout, jiffies));
+
+       if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
+           CRT_HOTPLUG_MONITOR_COLOR)
+               return true;
+
+       return false;
+}
+
+static bool intel_crt_detect_ddc(struct drm_connector *connector)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+
+       /* CRT should always be at 0, but check anyway */
+       if (intel_output->type != INTEL_OUTPUT_ANALOG)
+               return false;
+
+       return intel_ddc_probe(intel_output);
+}
+
+static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+
+       if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+               if (intel_crt_detect_hotplug(connector))
+                       return connector_status_connected;
+               else
+                       return connector_status_disconnected;
+       }
+
+       if (intel_crt_detect_ddc(connector))
+               return connector_status_connected;
+
+       /* TODO use load detect */
+       return connector_status_unknown;
+}
+
+static void intel_crt_destroy(struct drm_connector *connector)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+
+       intel_i2c_destroy(intel_output->ddc_bus);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+static int intel_crt_get_modes(struct drm_connector *connector)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+       return intel_ddc_get_modes(intel_output);
+}
+
+static int intel_crt_set_property(struct drm_connector *connector,
+                                 struct drm_property *property,
+                                 uint64_t value)
+{
+       struct drm_device *dev = connector->dev;
+
+       if (property == dev->mode_config.dpms_property && connector->encoder)
+               intel_crt_dpms(connector->encoder, (uint32_t)(value & 0xf));
+
+       return 0;
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
+       .dpms = intel_crt_dpms,
+       .mode_fixup = intel_crt_mode_fixup,
+       .prepare = intel_encoder_prepare,
+       .commit = intel_encoder_commit,
+       .mode_set = intel_crt_mode_set,
+};
+
+static const struct drm_connector_funcs intel_crt_connector_funcs = {
+       .detect = intel_crt_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = intel_crt_destroy,
+       .set_property = intel_crt_set_property,
+};
+
+static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
+       .mode_valid = intel_crt_mode_valid,
+       .get_modes = intel_crt_get_modes,
+       .best_encoder = intel_best_encoder,
+};
+
+static void intel_crt_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+       .destroy = intel_crt_enc_destroy,
+};
+
+void intel_crt_init(struct drm_device *dev)
+{
+       struct drm_connector *connector;
+       struct intel_output *intel_output;
+
+       intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+       if (!intel_output)
+               return;
+
+       connector = &intel_output->base;
+       drm_connector_init(dev, &intel_output->base,
+                          &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+       drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
+                        DRM_MODE_ENCODER_DAC);
+
+       drm_mode_connector_attach_encoder(&intel_output->base,
+                                         &intel_output->enc);
+
+       /* Set up the DDC bus. */
+       intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
+       if (!intel_output->ddc_bus) {
+               dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+                          "failed.\n");
+               return;
+       }
+
+       intel_output->type = INTEL_OUTPUT_ANALOG;
+       connector->interlace_allowed = 0;
+       connector->doublescan_allowed = 0;
+
+       drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
+       drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+       drm_sysfs_connector_add(connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
new file mode 100644 (file)
index 0000000..e5c1c80
--- /dev/null
@@ -0,0 +1,1618 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#include "drm_crtc_helper.h"
+
+bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
+
+typedef struct {
+    /* given values */
+    int n;
+    int m1, m2;
+    int p1, p2;
+    /* derived values */
+    int        dot;
+    int        vco;
+    int        m;
+    int        p;
+} intel_clock_t;
+
+typedef struct {
+    int        min, max;
+} intel_range_t;
+
+typedef struct {
+    int        dot_limit;
+    int        p2_slow, p2_fast;
+} intel_p2_t;
+
+#define INTEL_P2_NUM                 2
+
+typedef struct {
+    intel_range_t   dot, vco, n, m, m1, m2, p, p1;
+    intel_p2_t     p2;
+} intel_limit_t;
+
+#define I8XX_DOT_MIN             25000
+#define I8XX_DOT_MAX            350000
+#define I8XX_VCO_MIN            930000
+#define I8XX_VCO_MAX           1400000
+#define I8XX_N_MIN                   3
+#define I8XX_N_MAX                  16
+#define I8XX_M_MIN                  96
+#define I8XX_M_MAX                 140
+#define I8XX_M1_MIN                 18
+#define I8XX_M1_MAX                 26
+#define I8XX_M2_MIN                  6
+#define I8XX_M2_MAX                 16
+#define I8XX_P_MIN                   4
+#define I8XX_P_MAX                 128
+#define I8XX_P1_MIN                  2
+#define I8XX_P1_MAX                 33
+#define I8XX_P1_LVDS_MIN             1
+#define I8XX_P1_LVDS_MAX             6
+#define I8XX_P2_SLOW                 4
+#define I8XX_P2_FAST                 2
+#define I8XX_P2_LVDS_SLOW            14
+#define I8XX_P2_LVDS_FAST            14 /* No fast option */
+#define I8XX_P2_SLOW_LIMIT      165000
+
+#define I9XX_DOT_MIN             20000
+#define I9XX_DOT_MAX            400000
+#define I9XX_VCO_MIN           1400000
+#define I9XX_VCO_MAX           2800000
+#define I9XX_N_MIN                   3
+#define I9XX_N_MAX                   8
+#define I9XX_M_MIN                  70
+#define I9XX_M_MAX                 120
+#define I9XX_M1_MIN                 10
+#define I9XX_M1_MAX                 20
+#define I9XX_M2_MIN                  5
+#define I9XX_M2_MAX                  9
+#define I9XX_P_SDVO_DAC_MIN          5
+#define I9XX_P_SDVO_DAC_MAX         80
+#define I9XX_P_LVDS_MIN                      7
+#define I9XX_P_LVDS_MAX                     98
+#define I9XX_P1_MIN                  1
+#define I9XX_P1_MAX                  8
+#define I9XX_P2_SDVO_DAC_SLOW               10
+#define I9XX_P2_SDVO_DAC_FAST                5
+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT     200000
+#define I9XX_P2_LVDS_SLOW                   14
+#define I9XX_P2_LVDS_FAST                    7
+#define I9XX_P2_LVDS_SLOW_LIMIT                 112000
+
+#define INTEL_LIMIT_I8XX_DVO_DAC    0
+#define INTEL_LIMIT_I8XX_LVDS      1
+#define INTEL_LIMIT_I9XX_SDVO_DAC   2
+#define INTEL_LIMIT_I9XX_LVDS      3
+
+static const intel_limit_t intel_limits[] = {
+    { /* INTEL_LIMIT_I8XX_DVO_DAC */
+        .dot = { .min = I8XX_DOT_MIN,          .max = I8XX_DOT_MAX },
+        .vco = { .min = I8XX_VCO_MIN,          .max = I8XX_VCO_MAX },
+        .n   = { .min = I8XX_N_MIN,            .max = I8XX_N_MAX },
+        .m   = { .min = I8XX_M_MIN,            .max = I8XX_M_MAX },
+        .m1  = { .min = I8XX_M1_MIN,           .max = I8XX_M1_MAX },
+        .m2  = { .min = I8XX_M2_MIN,           .max = I8XX_M2_MAX },
+        .p   = { .min = I8XX_P_MIN,            .max = I8XX_P_MAX },
+        .p1  = { .min = I8XX_P1_MIN,           .max = I8XX_P1_MAX },
+       .p2  = { .dot_limit = I8XX_P2_SLOW_LIMIT,
+                .p2_slow = I8XX_P2_SLOW,       .p2_fast = I8XX_P2_FAST },
+    },
+    { /* INTEL_LIMIT_I8XX_LVDS */
+        .dot = { .min = I8XX_DOT_MIN,          .max = I8XX_DOT_MAX },
+        .vco = { .min = I8XX_VCO_MIN,          .max = I8XX_VCO_MAX },
+        .n   = { .min = I8XX_N_MIN,            .max = I8XX_N_MAX },
+        .m   = { .min = I8XX_M_MIN,            .max = I8XX_M_MAX },
+        .m1  = { .min = I8XX_M1_MIN,           .max = I8XX_M1_MAX },
+        .m2  = { .min = I8XX_M2_MIN,           .max = I8XX_M2_MAX },
+        .p   = { .min = I8XX_P_MIN,            .max = I8XX_P_MAX },
+        .p1  = { .min = I8XX_P1_LVDS_MIN,      .max = I8XX_P1_LVDS_MAX },
+       .p2  = { .dot_limit = I8XX_P2_SLOW_LIMIT,
+                .p2_slow = I8XX_P2_LVDS_SLOW,  .p2_fast = I8XX_P2_LVDS_FAST },
+    },
+    { /* INTEL_LIMIT_I9XX_SDVO_DAC */
+        .dot = { .min = I9XX_DOT_MIN,          .max = I9XX_DOT_MAX },
+        .vco = { .min = I9XX_VCO_MIN,          .max = I9XX_VCO_MAX },
+        .n   = { .min = I9XX_N_MIN,            .max = I9XX_N_MAX },
+        .m   = { .min = I9XX_M_MIN,            .max = I9XX_M_MAX },
+        .m1  = { .min = I9XX_M1_MIN,           .max = I9XX_M1_MAX },
+        .m2  = { .min = I9XX_M2_MIN,           .max = I9XX_M2_MAX },
+        .p   = { .min = I9XX_P_SDVO_DAC_MIN,   .max = I9XX_P_SDVO_DAC_MAX },
+        .p1  = { .min = I9XX_P1_MIN,           .max = I9XX_P1_MAX },
+       .p2  = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+                .p2_slow = I9XX_P2_SDVO_DAC_SLOW,      .p2_fast = I9XX_P2_SDVO_DAC_FAST },
+    },
+    { /* INTEL_LIMIT_I9XX_LVDS */
+        .dot = { .min = I9XX_DOT_MIN,          .max = I9XX_DOT_MAX },
+        .vco = { .min = I9XX_VCO_MIN,          .max = I9XX_VCO_MAX },
+        .n   = { .min = I9XX_N_MIN,            .max = I9XX_N_MAX },
+        .m   = { .min = I9XX_M_MIN,            .max = I9XX_M_MAX },
+        .m1  = { .min = I9XX_M1_MIN,           .max = I9XX_M1_MAX },
+        .m2  = { .min = I9XX_M2_MIN,           .max = I9XX_M2_MAX },
+        .p   = { .min = I9XX_P_LVDS_MIN,       .max = I9XX_P_LVDS_MAX },
+        .p1  = { .min = I9XX_P1_MIN,           .max = I9XX_P1_MAX },
+       /* The single-channel range is 25-112Mhz, and dual-channel
+        * is 80-224Mhz.  Prefer single channel as much as possible.
+        */
+       .p2  = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+                .p2_slow = I9XX_P2_LVDS_SLOW,  .p2_fast = I9XX_P2_LVDS_FAST },
+    },
+};
+
+static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       const intel_limit_t *limit;
+
+       if (IS_I9XX(dev)) {
+               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+                       limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
+               else
+                       limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+       } else {
+               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+                       limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
+               else
+                       limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
+       }
+       return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+static void i8xx_clock(int refclk, intel_clock_t *clock)
+{
+       clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = refclk * clock->m / (clock->n + 2);
+       clock->dot = clock->vco / clock->p;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
+
+static void i9xx_clock(int refclk, intel_clock_t *clock)
+{
+       clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = refclk * clock->m / (clock->n + 2);
+       clock->dot = clock->vco / clock->p;
+}
+
+static void intel_clock(struct drm_device *dev, int refclk,
+                       intel_clock_t *clock)
+{
+       if (IS_I9XX(dev))
+               i9xx_clock (refclk, clock);
+       else
+               i8xx_clock (refclk, clock);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
+{
+    struct drm_device *dev = crtc->dev;
+    struct drm_mode_config *mode_config = &dev->mode_config;
+    struct drm_connector *l_entry;
+
+    list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+           if (l_entry->encoder &&
+               l_entry->encoder->crtc == crtc) {
+                   struct intel_output *intel_output = to_intel_output(l_entry);
+                   if (intel_output->type == type)
+                           return true;
+           }
+    }
+    return false;
+}
+
+#define INTELPllInvalid(s)   { /* ErrorF (s) */; return false; }
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+
+static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
+{
+       const intel_limit_t *limit = intel_limit (crtc);
+
+       if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
+               INTELPllInvalid ("p1 out of range\n");
+       if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
+               INTELPllInvalid ("p out of range\n");
+       if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
+               INTELPllInvalid ("m2 out of range\n");
+       if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
+               INTELPllInvalid ("m1 out of range\n");
+       if (clock->m1 <= clock->m2)
+               INTELPllInvalid ("m1 <= m2\n");
+       if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
+               INTELPllInvalid ("m out of range\n");
+       if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
+               INTELPllInvalid ("n out of range\n");
+       if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+               INTELPllInvalid ("vco out of range\n");
+       /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+        * connector, etc., rather than just a single range.
+        */
+       if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+               INTELPllInvalid ("dot out of range\n");
+
+       return true;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
+                               int refclk, intel_clock_t *best_clock)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       intel_clock_t clock;
+       const intel_limit_t *limit = intel_limit(crtc);
+       int err = target;
+
+       if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+           (I915_READ(LVDS) & LVDS_PORT_EN) != 0) {
+               /*
+                * For LVDS, if the panel is on, just rely on its current
+                * settings for dual-channel.  We haven't figured out how to
+                * reliably set up different single/dual channel state, if we
+                * even can.
+                */
+               if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+                   LVDS_CLKB_POWER_UP)
+                       clock.p2 = limit->p2.p2_fast;
+               else
+                       clock.p2 = limit->p2.p2_slow;
+       } else {
+               if (target < limit->p2.dot_limit)
+                       clock.p2 = limit->p2.p2_slow;
+               else
+                       clock.p2 = limit->p2.p2_fast;
+       }
+
+       memset (best_clock, 0, sizeof (*best_clock));
+
+       for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+               for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
+                            clock.m2 <= limit->m2.max; clock.m2++) {
+                       for (clock.n = limit->n.min; clock.n <= limit->n.max;
+                            clock.n++) {
+                               for (clock.p1 = limit->p1.min;
+                                    clock.p1 <= limit->p1.max; clock.p1++) {
+                                       int this_err;
+
+                                       intel_clock(dev, refclk, &clock);
+
+                                       if (!intel_PLL_is_valid(crtc, &clock))
+                                               continue;
+
+                                       this_err = abs(clock.dot - target);
+                                       if (this_err < err) {
+                                               *best_clock = clock;
+                                               err = this_err;
+                                       }
+                               }
+                       }
+               }
+       }
+
+       return (err != target);
+}
+
+void
+intel_wait_for_vblank(struct drm_device *dev)
+{
+       /* Wait for 20ms, i.e. one cycle at 50hz. */
+       udelay(20000);
+}
+
+static void
+intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+                   struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_master_private *master_priv;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_framebuffer *intel_fb;
+       struct drm_i915_gem_object *obj_priv;
+       struct drm_gem_object *obj;
+       int pipe = intel_crtc->pipe;
+       unsigned long Start, Offset;
+       int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR);
+       int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+       int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       u32 dspcntr, alignment;
+
+       /* no fb bound */
+       if (!crtc->fb) {
+               DRM_DEBUG("No FB bound\n");
+               return;
+       }
+
+       intel_fb = to_intel_framebuffer(crtc->fb);
+       obj = intel_fb->obj;
+       obj_priv = obj->driver_private;
+
+       switch (obj_priv->tiling_mode) {
+       case I915_TILING_NONE:
+               alignment = 64 * 1024;
+               break;
+       case I915_TILING_X:
+               if (IS_I9XX(dev))
+                       alignment = 1024 * 1024;
+               else
+                       alignment = 512 * 1024;
+               break;
+       case I915_TILING_Y:
+               /* FIXME: Is this true? */
+               DRM_ERROR("Y tiled not allowed for scan out buffers\n");
+               return;
+       default:
+               BUG();
+       }
+
+       if (i915_gem_object_pin(intel_fb->obj, alignment))
+               return;
+
+       i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1);
+
+       Start = obj_priv->gtt_offset;
+       Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+
+       I915_WRITE(dspstride, crtc->fb->pitch);
+
+       dspcntr = I915_READ(dspcntr_reg);
+       switch (crtc->fb->bits_per_pixel) {
+       case 8:
+               dspcntr |= DISPPLANE_8BPP;
+               break;
+       case 16:
+               if (crtc->fb->depth == 15)
+                       dspcntr |= DISPPLANE_15_16BPP;
+               else
+                       dspcntr |= DISPPLANE_16BPP;
+               break;
+       case 24:
+       case 32:
+               dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+               break;
+       default:
+               DRM_ERROR("Unknown color depth\n");
+               return;
+       }
+       I915_WRITE(dspcntr_reg, dspcntr);
+
+       DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+       if (IS_I965G(dev)) {
+               I915_WRITE(dspbase, Offset);
+               I915_READ(dspbase);
+               I915_WRITE(dspsurf, Start);
+               I915_READ(dspsurf);
+       } else {
+               I915_WRITE(dspbase, Start + Offset);
+               I915_READ(dspbase);
+       }
+
+       intel_wait_for_vblank(dev);
+
+       if (old_fb) {
+               intel_fb = to_intel_framebuffer(old_fb);
+               i915_gem_object_unpin(intel_fb->obj);
+       }
+
+       if (!dev->primary->master)
+               return;
+
+       master_priv = dev->primary->master->driver_priv;
+       if (!master_priv->sarea_priv)
+               return;
+
+       switch (pipe) {
+       case 0:
+               master_priv->sarea_priv->pipeA_x = x;
+               master_priv->sarea_priv->pipeA_y = y;
+               break;
+       case 1:
+               master_priv->sarea_priv->pipeB_x = x;
+               master_priv->sarea_priv->pipeB_y = y;
+               break;
+       default:
+               DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+               break;
+       }
+}
+
+
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_master_private *master_priv;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       u32 temp;
+       bool enabled;
+
+       /* XXX: When our outputs are all unaware of DPMS modes other than off
+        * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+        */
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+               /* Enable the DPLL */
+               temp = I915_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) == 0) {
+                       I915_WRITE(dpll_reg, temp);
+                       I915_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       I915_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+                       I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+                       I915_READ(dpll_reg);
+                       /* Wait for the clocks to stabilize. */
+                       udelay(150);
+               }
+
+               /* Enable the pipe */
+               temp = I915_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) == 0)
+                       I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+
+               /* Enable the plane */
+               temp = I915_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+                       I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+               }
+
+               intel_crtc_load_lut(crtc);
+
+               /* Give the overlay scaler a chance to enable if it's on this pipe */
+               //intel_crtc_dpms_video(crtc, true); TODO
+       break;
+       case DRM_MODE_DPMS_OFF:
+               /* Give the overlay scaler a chance to disable if it's on this pipe */
+               //intel_crtc_dpms_video(crtc, FALSE); TODO
+
+               /* Disable the VGA plane that we never use */
+               I915_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+               /* Disable display plane */
+               temp = I915_READ(dspcntr_reg);
+               if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+                       I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+                       I915_READ(dspbase_reg);
+               }
+
+               if (!IS_I9XX(dev)) {
+                       /* Wait for vblank for the disable to take effect */
+                       intel_wait_for_vblank(dev);
+               }
+
+               /* Next, disable display pipes */
+               temp = I915_READ(pipeconf_reg);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+                       I915_READ(pipeconf_reg);
+               }
+
+               /* Wait for vblank for the disable to take effect. */
+               intel_wait_for_vblank(dev);
+
+               temp = I915_READ(dpll_reg);
+               if ((temp & DPLL_VCO_ENABLE) != 0) {
+                       I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+                       I915_READ(dpll_reg);
+               }
+
+               /* Wait for the clocks to turn off. */
+               udelay(150);
+               break;
+       }
+
+       if (!dev->primary->master)
+               return;
+
+       master_priv = dev->primary->master->driver_priv;
+       if (!master_priv->sarea_priv)
+               return;
+
+       enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+       switch (pipe) {
+       case 0:
+               master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
+               master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
+               break;
+       case 1:
+               master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
+               master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
+               break;
+       default:
+               DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+               break;
+       }
+
+       intel_crtc->dpms_mode = mode;
+}
+
+static void intel_crtc_prepare (struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void intel_crtc_commit (struct drm_crtc *crtc)
+{
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void intel_encoder_prepare (struct drm_encoder *encoder)
+{
+       struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+       /* lvds has its own version of prepare see intel_lvds_prepare */
+       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void intel_encoder_commit (struct drm_encoder *encoder)
+{
+       struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+       /* lvds has its own version of commit see intel_lvds_commit */
+       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+
+/** Returns the core display clock speed for i830 - i945 */
+static int intel_get_core_clock_speed(struct drm_device *dev)
+{
+
+       /* Core clock values taken from the published datasheets.
+        * The 830 may go up to 166 Mhz, which we should check.
+        */
+       if (IS_I945G(dev))
+               return 400000;
+       else if (IS_I915G(dev))
+               return 333000;
+       else if (IS_I945GM(dev) || IS_845G(dev))
+               return 200000;
+       else if (IS_I915GM(dev)) {
+               u16 gcfgc = 0;
+
+               pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+               if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
+                       return 133000;
+               else {
+                       switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+                       case GC_DISPLAY_CLOCK_333_MHZ:
+                               return 333000;
+                       default:
+                       case GC_DISPLAY_CLOCK_190_200_MHZ:
+                               return 190000;
+                       }
+               }
+       } else if (IS_I865G(dev))
+               return 266000;
+       else if (IS_I855(dev)) {
+               u16 hpllcc = 0;
+               /* Assume that the hardware is in the high speed state.  This
+                * should be the default.
+                */
+               switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
+               case GC_CLOCK_133_200:
+               case GC_CLOCK_100_200:
+                       return 200000;
+               case GC_CLOCK_166_250:
+                       return 250000;
+               case GC_CLOCK_100_133:
+                       return 133000;
+               }
+       } else /* 852, 830 */
+               return 133000;
+
+       return 0; /* Silence gcc warning */
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int intel_panel_fitter_pipe (struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32  pfit_control;
+
+       /* i830 doesn't have a panel fitter */
+       if (IS_I830(dev))
+               return -1;
+
+       pfit_control = I915_READ(PFIT_CONTROL);
+
+       /* See if the panel fitter is in use */
+       if ((pfit_control & PFIT_ENABLE) == 0)
+               return -1;
+
+       /* 965 can place panel fitter on either pipe */
+       if (IS_I965G(dev))
+               return (pfit_control >> 29) & 0x3;
+
+       /* older chips can only use pipe 1 */
+       return 1;
+}
+
+static void intel_crtc_mode_set(struct drm_crtc *crtc,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode,
+                               int x, int y,
+                               struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int fp_reg = (pipe == 0) ? FPA0 : FPB0;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+       int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+       int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+       int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int refclk;
+       intel_clock_t clock;
+       u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+       bool ok, is_sdvo = false, is_dvo = false;
+       bool is_crt = false, is_lvds = false, is_tv = false;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *connector;
+
+       drm_vblank_pre_modeset(dev, pipe);
+
+       list_for_each_entry(connector, &mode_config->connector_list, head) {
+               struct intel_output *intel_output = to_intel_output(connector);
+
+               if (!connector->encoder || connector->encoder->crtc != crtc)
+                       continue;
+
+               switch (intel_output->type) {
+               case INTEL_OUTPUT_LVDS:
+                       is_lvds = true;
+                       break;
+               case INTEL_OUTPUT_SDVO:
+                       is_sdvo = true;
+                       break;
+               case INTEL_OUTPUT_DVO:
+                       is_dvo = true;
+                       break;
+               case INTEL_OUTPUT_TVOUT:
+                       is_tv = true;
+                       break;
+               case INTEL_OUTPUT_ANALOG:
+                       is_crt = true;
+                       break;
+               }
+       }
+
+       if (IS_I9XX(dev)) {
+               refclk = 96000;
+       } else {
+               refclk = 48000;
+       }
+
+       ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
+       if (!ok) {
+               DRM_ERROR("Couldn't find PLL settings for mode!\n");
+               return;
+       }
+
+       fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+
+       dpll = DPLL_VGA_MODE_DIS;
+       if (IS_I9XX(dev)) {
+               if (is_lvds)
+                       dpll |= DPLLB_MODE_LVDS;
+               else
+                       dpll |= DPLLB_MODE_DAC_SERIAL;
+               if (is_sdvo) {
+                       dpll |= DPLL_DVO_HIGH_SPEED;
+                       if (IS_I945G(dev) || IS_I945GM(dev)) {
+                               int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+                               dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+                       }
+               }
+
+               /* compute bitmask from p1 value */
+               dpll |= (1 << (clock.p1 - 1)) << 16;
+               switch (clock.p2) {
+               case 5:
+                       dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+                       break;
+               case 7:
+                       dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+                       break;
+               case 10:
+                       dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+                       break;
+               case 14:
+                       dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+                       break;
+               }
+               if (IS_I965G(dev))
+                       dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+       } else {
+               if (is_lvds) {
+                       dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+               } else {
+                       if (clock.p1 == 2)
+                               dpll |= PLL_P1_DIVIDE_BY_TWO;
+                       else
+                               dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+                       if (clock.p2 == 4)
+                               dpll |= PLL_P2_DIVIDE_BY_4;
+               }
+       }
+
+       if (is_tv) {
+               /* XXX: just matching BIOS for now */
+/*     dpll |= PLL_REF_INPUT_TVCLKINBC; */
+               dpll |= 3;
+       }
+       else
+               dpll |= PLL_REF_INPUT_DREFCLK;
+
+       /* setup pipeconf */
+       pipeconf = I915_READ(pipeconf_reg);
+
+       /* Set up the display plane register */
+       dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+       if (pipe == 0)
+               dspcntr |= DISPPLANE_SEL_PIPE_A;
+       else
+               dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+       if (pipe == 0 && !IS_I965G(dev)) {
+               /* Enable pixel doubling when the dot clock is > 90% of the (display)
+                * core speed.
+                *
+                * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
+                * pipe == 0 check?
+                */
+               if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10)
+                       pipeconf |= PIPEACONF_DOUBLE_WIDE;
+               else
+                       pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
+       }
+
+       dspcntr |= DISPLAY_PLANE_ENABLE;
+       pipeconf |= PIPEACONF_ENABLE;
+       dpll |= DPLL_VCO_ENABLE;
+
+
+       /* Disable the panel fitter if it was on our pipe */
+       if (intel_panel_fitter_pipe(dev) == pipe)
+               I915_WRITE(PFIT_CONTROL, 0);
+
+       DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+       drm_mode_debug_printmodeline(mode);
+
+
+       if (dpll & DPLL_VCO_ENABLE) {
+               I915_WRITE(fp_reg, fp);
+               I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+               I915_READ(dpll_reg);
+               udelay(150);
+       }
+
+       /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+        * This is an exception to the general rule that mode_set doesn't turn
+        * things on.
+        */
+       if (is_lvds) {
+               u32 lvds = I915_READ(LVDS);
+
+               lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
+               /* Set the B0-B3 data pairs corresponding to whether we're going to
+                * set the DPLLs for dual-channel mode or not.
+                */
+               if (clock.p2 == 7)
+                       lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+               else
+                       lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+               /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+                * appropriately here, but we need to look more thoroughly into how
+                * panels behave in the two modes.
+                */
+
+               I915_WRITE(LVDS, lvds);
+               I915_READ(LVDS);
+       }
+
+       I915_WRITE(fp_reg, fp);
+       I915_WRITE(dpll_reg, dpll);
+       I915_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150);
+
+       if (IS_I965G(dev)) {
+               int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+               I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
+                          ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+       } else {
+               /* write it again -- the BIOS does, after all */
+               I915_WRITE(dpll_reg, dpll);
+       }
+       I915_READ(dpll_reg);
+       /* Wait for the clocks to stabilize. */
+       udelay(150);
+
+       I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+                  ((adjusted_mode->crtc_htotal - 1) << 16));
+       I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+                  ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+                  ((adjusted_mode->crtc_hsync_end - 1) << 16));
+       I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+                  ((adjusted_mode->crtc_vtotal - 1) << 16));
+       I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+                  ((adjusted_mode->crtc_vblank_end - 1) << 16));
+       I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+                  ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       /* pipesrc and dspsize control the size that is scaled from, which should
+        * always be the user's requested size.
+        */
+       I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+       I915_WRITE(dsppos_reg, 0);
+       I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+       I915_WRITE(pipeconf_reg, pipeconf);
+       I915_READ(pipeconf_reg);
+
+       intel_wait_for_vblank(dev);
+
+       I915_WRITE(dspcntr_reg, dspcntr);
+
+       /* Flush the plane changes */
+       intel_pipe_set_base(crtc, x, y, old_fb);
+
+       drm_vblank_post_modeset(dev, pipe);
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
+       int i;
+
+       /* The clocks have to be on to load the palette. */
+       if (!crtc->enabled)
+               return;
+
+       for (i = 0; i < 256; i++) {
+               I915_WRITE(palreg + 4 * i,
+                          (intel_crtc->lut_r[i] << 16) |
+                          (intel_crtc->lut_g[i] << 8) |
+                          intel_crtc->lut_b[i]);
+       }
+}
+
+static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+                                struct drm_file *file_priv,
+                                uint32_t handle,
+                                uint32_t width, uint32_t height)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_gem_object *bo;
+       struct drm_i915_gem_object *obj_priv;
+       int pipe = intel_crtc->pipe;
+       uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+       uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+       uint32_t temp;
+       size_t addr;
+
+       DRM_DEBUG("\n");
+
+       /* if we want to turn off the cursor ignore width and height */
+       if (!handle) {
+               DRM_DEBUG("cursor off\n");
+               /* turn of the cursor */
+               temp = 0;
+               temp |= CURSOR_MODE_DISABLE;
+
+               I915_WRITE(control, temp);
+               I915_WRITE(base, 0);
+               return 0;
+       }
+
+       /* Currently we only support 64x64 cursors */
+       if (width != 64 || height != 64) {
+               DRM_ERROR("we currently only support 64x64 cursors\n");
+               return -EINVAL;
+       }
+
+       bo = drm_gem_object_lookup(dev, file_priv, handle);
+       if (!bo)
+               return -ENOENT;
+
+       obj_priv = bo->driver_private;
+
+       if (bo->size < width * height * 4) {
+               DRM_ERROR("buffer is to small\n");
+               drm_gem_object_unreference(bo);
+               return -ENOMEM;
+       }
+
+       if (dev_priv->cursor_needs_physical) {
+               addr = dev->agp->base + obj_priv->gtt_offset;
+       } else {
+               addr = obj_priv->gtt_offset;
+       }
+
+       intel_crtc->cursor_addr = addr;
+       temp = 0;
+       /* set the pipe for the cursor */
+       temp |= (pipe << 28);
+       temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+       I915_WRITE(control, temp);
+       I915_WRITE(base, addr);
+
+       return 0;
+}
+
+static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       uint32_t temp = 0;
+       uint32_t adder;
+
+       if (x < 0) {
+               temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+               x = -x;
+       }
+       if (y < 0) {
+               temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+               y = -y;
+       }
+
+       temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+       temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+       adder = intel_crtc->cursor_addr;
+       I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+       I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
+
+       return 0;
+}
+
+/** Sets the color ramps on behalf of RandR */
+void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+                                u16 blue, int regno)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       intel_crtc->lut_r[regno] = red >> 8;
+       intel_crtc->lut_g[regno] = green >> 8;
+       intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                u16 *blue, uint32_t size)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int i;
+
+       if (size != 256)
+               return;
+
+       for (i = 0; i < 256; i++) {
+               intel_crtc->lut_r[i] = red[i] >> 8;
+               intel_crtc->lut_g[i] = green[i] >> 8;
+               intel_crtc->lut_b[i] = blue[i] >> 8;
+       }
+
+       intel_crtc_load_lut(crtc);
+}
+
+/**
+ * Get a pipe with a simple mode set on it for doing load-based monitor
+ * detection.
+ *
+ * It will be up to the load-detect code to adjust the pipe as appropriate for
+ * its requirements.  The pipe will be connected to no other outputs.
+ *
+ * Currently this code will only succeed if there is a pipe with no outputs
+ * configured for it.  In the future, it could choose to temporarily disable
+ * some outputs to free up a pipe for its use.
+ *
+ * \return crtc, or NULL if no pipes are available.
+ */
+
+/* VESA 640x480x72Hz mode to set on the pipe */
+static struct drm_display_mode load_detect_mode = {
+       DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
+                704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+};
+
+struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
+                                           struct drm_display_mode *mode,
+                                           int *dpms_mode)
+{
+       struct intel_crtc *intel_crtc;
+       struct drm_crtc *possible_crtc;
+       struct drm_crtc *supported_crtc =NULL;
+       struct drm_encoder *encoder = &intel_output->enc;
+       struct drm_crtc *crtc = NULL;
+       struct drm_device *dev = encoder->dev;
+       struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+       struct drm_crtc_helper_funcs *crtc_funcs;
+       int i = -1;
+
+       /*
+        * Algorithm gets a little messy:
+        *   - if the connector already has an assigned crtc, use it (but make
+        *     sure it's on first)
+        *   - try to find the first unused crtc that can drive this connector,
+        *     and use that if we find one
+        *   - if there are no unused crtcs available, try to use the first
+        *     one we found that supports the connector
+        */
+
+       /* See if we already have a CRTC for this connector */
+       if (encoder->crtc) {
+               crtc = encoder->crtc;
+               /* Make sure the crtc and connector are running */
+               intel_crtc = to_intel_crtc(crtc);
+               *dpms_mode = intel_crtc->dpms_mode;
+               if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
+                       crtc_funcs = crtc->helper_private;
+                       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+                       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+               }
+               return crtc;
+       }
+
+       /* Find an unused one (if possible) */
+       list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
+               i++;
+               if (!(encoder->possible_crtcs & (1 << i)))
+                       continue;
+               if (!possible_crtc->enabled) {
+                       crtc = possible_crtc;
+                       break;
+               }
+               if (!supported_crtc)
+                       supported_crtc = possible_crtc;
+       }
+
+       /*
+        * If we didn't find an unused CRTC, don't use any.
+        */
+       if (!crtc) {
+               return NULL;
+       }
+
+       encoder->crtc = crtc;
+       intel_output->load_detect_temp = true;
+
+       intel_crtc = to_intel_crtc(crtc);
+       *dpms_mode = intel_crtc->dpms_mode;
+
+       if (!crtc->enabled) {
+               if (!mode)
+                       mode = &load_detect_mode;
+               drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb);
+       } else {
+               if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
+                       crtc_funcs = crtc->helper_private;
+                       crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+               }
+
+               /* Add this connector to the crtc */
+               encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode);
+               encoder_funcs->commit(encoder);
+       }
+       /* let the connector get through one full cycle before testing */
+       intel_wait_for_vblank(dev);
+
+       return crtc;
+}
+
+void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode)
+{
+       struct drm_encoder *encoder = &intel_output->enc;
+       struct drm_device *dev = encoder->dev;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+       if (intel_output->load_detect_temp) {
+               encoder->crtc = NULL;
+               intel_output->load_detect_temp = false;
+               crtc->enabled = drm_helper_crtc_in_use(crtc);
+               drm_helper_disable_unused_functions(dev);
+       }
+
+       /* Switch crtc and output back off if necessary */
+       if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
+               if (encoder->crtc == crtc)
+                       encoder_funcs->dpms(encoder, dpms_mode);
+               crtc_funcs->dpms(crtc, dpms_mode);
+       }
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
+       u32 fp;
+       intel_clock_t clock;
+
+       if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+               fp = I915_READ((pipe == 0) ? FPA0 : FPB0);
+       else
+               fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
+
+       clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+       clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+       clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+       if (IS_I9XX(dev)) {
+               clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+                              DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+               switch (dpll & DPLL_MODE_MASK) {
+               case DPLLB_MODE_DAC_SERIAL:
+                       clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+                               5 : 10;
+                       break;
+               case DPLLB_MODE_LVDS:
+                       clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+                               7 : 14;
+                       break;
+               default:
+                       DRM_DEBUG("Unknown DPLL mode %08x in programmed "
+                                 "mode\n", (int)(dpll & DPLL_MODE_MASK));
+                       return 0;
+               }
+
+               /* XXX: Handle the 100Mhz refclk */
+               i9xx_clock(96000, &clock);
+       } else {
+               bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
+
+               if (is_lvds) {
+                       clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+                                      DPLL_FPA01_P1_POST_DIV_SHIFT);
+                       clock.p2 = 14;
+
+                       if ((dpll & PLL_REF_INPUT_MASK) ==
+                           PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+                               /* XXX: might not be 66MHz */
+                               i8xx_clock(66000, &clock);
+                       } else
+                               i8xx_clock(48000, &clock);
+               } else {
+                       if (dpll & PLL_P1_DIVIDE_BY_TWO)
+                               clock.p1 = 2;
+                       else {
+                               clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+                                           DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+                       }
+                       if (dpll & PLL_P2_DIVIDE_BY_4)
+                               clock.p2 = 4;
+                       else
+                               clock.p2 = 2;
+
+                       i8xx_clock(48000, &clock);
+               }
+       }
+
+       /* XXX: It would be nice to validate the clocks, but we can't reuse
+        * i830PllIsValid() because it relies on the xf86_config connector
+        * configuration being accurate, which it isn't necessarily.
+        */
+
+       return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+                                            struct drm_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       struct drm_display_mode *mode;
+       int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+       int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+       int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+       int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+
+       mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+       if (!mode)
+               return NULL;
+
+       mode->clock = intel_crtc_clock_get(dev, crtc);
+       mode->hdisplay = (htot & 0xffff) + 1;
+       mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+       mode->hsync_start = (hsync & 0xffff) + 1;
+       mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+       mode->vdisplay = (vtot & 0xffff) + 1;
+       mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+       mode->vsync_start = (vsync & 0xffff) + 1;
+       mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+       drm_mode_set_name(mode);
+       drm_mode_set_crtcinfo(mode, 0);
+
+       return mode;
+}
+
+static void intel_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       drm_crtc_cleanup(crtc);
+       kfree(intel_crtc);
+}
+
+static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+       .dpms = intel_crtc_dpms,
+       .mode_fixup = intel_crtc_mode_fixup,
+       .mode_set = intel_crtc_mode_set,
+       .mode_set_base = intel_pipe_set_base,
+       .prepare = intel_crtc_prepare,
+       .commit = intel_crtc_commit,
+};
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+       .cursor_set = intel_crtc_cursor_set,
+       .cursor_move = intel_crtc_cursor_move,
+       .gamma_set = intel_crtc_gamma_set,
+       .set_config = drm_crtc_helper_set_config,
+       .destroy = intel_crtc_destroy,
+};
+
+
+static void intel_crtc_init(struct drm_device *dev, int pipe)
+{
+       struct intel_crtc *intel_crtc;
+       int i;
+
+       intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+       if (intel_crtc == NULL)
+               return;
+
+       drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+
+       drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
+       intel_crtc->pipe = pipe;
+       for (i = 0; i < 256; i++) {
+               intel_crtc->lut_r[i] = i;
+               intel_crtc->lut_g[i] = i;
+               intel_crtc->lut_b[i] = i;
+       }
+
+       intel_crtc->cursor_addr = 0;
+       intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+       drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+
+       intel_crtc->mode_set.crtc = &intel_crtc->base;
+       intel_crtc->mode_set.connectors = (struct drm_connector **)(intel_crtc + 1);
+       intel_crtc->mode_set.num_connectors = 0;
+
+       if (i915_fbpercrtc) {
+
+
+
+       }
+}
+
+struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+{
+       struct drm_crtc *crtc = NULL;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+               if (intel_crtc->pipe == pipe)
+                       break;
+       }
+       return crtc;
+}
+
+static int intel_connector_clones(struct drm_device *dev, int type_mask)
+{
+       int index_mask = 0;
+       struct drm_connector *connector;
+       int entry = 0;
+
+        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct intel_output *intel_output = to_intel_output(connector);
+               if (type_mask & (1 << intel_output->type))
+                       index_mask |= (1 << entry);
+               entry++;
+       }
+       return index_mask;
+}
+
+
+static void intel_setup_outputs(struct drm_device *dev)
+{
+       struct drm_connector *connector;
+
+       intel_crt_init(dev);
+
+       /* Set up integrated LVDS */
+       if (IS_MOBILE(dev) && !IS_I830(dev))
+               intel_lvds_init(dev);
+
+       if (IS_I9XX(dev)) {
+               intel_sdvo_init(dev, SDVOB);
+               intel_sdvo_init(dev, SDVOC);
+       } else
+               intel_dvo_init(dev);
+
+       if (IS_I9XX(dev) && !IS_I915G(dev))
+               intel_tv_init(dev);
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct intel_output *intel_output = to_intel_output(connector);
+               struct drm_encoder *encoder = &intel_output->enc;
+               int crtc_mask = 0, clone_mask = 0;
+
+               /* valid crtcs */
+               switch(intel_output->type) {
+               case INTEL_OUTPUT_DVO:
+               case INTEL_OUTPUT_SDVO:
+                       crtc_mask = ((1 << 0)|
+                                    (1 << 1));
+                       clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
+                                     (1 << INTEL_OUTPUT_DVO) |
+                                     (1 << INTEL_OUTPUT_SDVO));
+                       break;
+               case INTEL_OUTPUT_ANALOG:
+                       crtc_mask = ((1 << 0)|
+                                    (1 << 1));
+                       clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
+                                     (1 << INTEL_OUTPUT_DVO) |
+                                     (1 << INTEL_OUTPUT_SDVO));
+                       break;
+               case INTEL_OUTPUT_LVDS:
+                       crtc_mask = (1 << 1);
+                       clone_mask = (1 << INTEL_OUTPUT_LVDS);
+                       break;
+               case INTEL_OUTPUT_TVOUT:
+                       crtc_mask = ((1 << 0) |
+                                    (1 << 1));
+                       clone_mask = (1 << INTEL_OUTPUT_TVOUT);
+                       break;
+               }
+               encoder->possible_crtcs = crtc_mask;
+               encoder->possible_clones = intel_connector_clones(dev, clone_mask);
+       }
+}
+
+static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct drm_device *dev = fb->dev;
+
+       if (fb->fbdev)
+               intelfb_remove(dev, fb);
+
+       drm_framebuffer_cleanup(fb);
+       mutex_lock(&dev->struct_mutex);
+       drm_gem_object_unreference(intel_fb->obj);
+       mutex_unlock(&dev->struct_mutex);
+
+       kfree(intel_fb);
+}
+
+static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+                                               struct drm_file *file_priv,
+                                               unsigned int *handle)
+{
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct drm_gem_object *object = intel_fb->obj;
+
+       return drm_gem_handle_create(file_priv, object, handle);
+}
+
+static const struct drm_framebuffer_funcs intel_fb_funcs = {
+       .destroy = intel_user_framebuffer_destroy,
+       .create_handle = intel_user_framebuffer_create_handle,
+};
+
+int intel_framebuffer_create(struct drm_device *dev,
+                            struct drm_mode_fb_cmd *mode_cmd,
+                            struct drm_framebuffer **fb,
+                            struct drm_gem_object *obj)
+{
+       struct intel_framebuffer *intel_fb;
+       int ret;
+
+       intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+       if (!intel_fb)
+               return -ENOMEM;
+
+       ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
+       if (ret) {
+               DRM_ERROR("framebuffer init failed %d\n", ret);
+               return ret;
+       }
+
+       drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+
+       intel_fb->obj = obj;
+
+       *fb = &intel_fb->base;
+
+       return 0;
+}
+
+
+static struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+                             struct drm_file *filp,
+                             struct drm_mode_fb_cmd *mode_cmd)
+{
+       struct drm_gem_object *obj;
+       struct drm_framebuffer *fb;
+       int ret;
+
+       obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
+       if (!obj)
+               return NULL;
+
+       ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
+       if (ret) {
+               drm_gem_object_unreference(obj);
+               return NULL;
+       }
+
+       return fb;
+}
+
+static const struct drm_mode_config_funcs intel_mode_funcs = {
+       .fb_create = intel_user_framebuffer_create,
+       .fb_changed = intelfb_probe,
+};
+
+void intel_modeset_init(struct drm_device *dev)
+{
+       int num_pipe;
+       int i;
+
+       drm_mode_config_init(dev);
+
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+
+       dev->mode_config.funcs = (void *)&intel_mode_funcs;
+
+       if (IS_I965G(dev)) {
+               dev->mode_config.max_width = 8192;
+               dev->mode_config.max_height = 8192;
+       } else {
+               dev->mode_config.max_width = 2048;
+               dev->mode_config.max_height = 2048;
+       }
+
+       /* set memory base */
+       if (IS_I9XX(dev))
+               dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
+       else
+               dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
+
+       if (IS_MOBILE(dev) || IS_I9XX(dev))
+               num_pipe = 2;
+       else
+               num_pipe = 1;
+       DRM_DEBUG("%d display pipe%s available.\n",
+                 num_pipe, num_pipe > 1 ? "s" : "");
+
+       for (i = 0; i < num_pipe; i++) {
+               intel_crtc_init(dev, i);
+       }
+
+       intel_setup_outputs(dev);
+}
+
+void intel_modeset_cleanup(struct drm_device *dev)
+{
+       drm_mode_config_cleanup(dev);
+}
+
+
+/* current intel driver doesn't take advantage of encoders
+   always give back the encoder for the connector
+*/
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+
+       return &intel_output->enc;
+}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
new file mode 100644 (file)
index 0000000..407edd5
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/i2c-algo-bit.h>
+#include "drm_crtc.h"
+
+#include "drm_crtc_helper.h"
+/*
+ * Display related stuff
+ */
+
+/* store information about an Ixxx DVO */
+/* The i830->i865 use multiple DVOs with multiple i2cs */
+/* the i915, i945 have a single sDVO i2c bus - which is different */
+#define MAX_OUTPUTS 6
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+#define INTEL_I2C_BUS_DVO 1
+#define INTEL_I2C_BUS_SDVO 2
+
+/* these are outputs from the chip - integrated only
+   external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
+struct intel_i2c_chan {
+       struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
+       u32 reg; /* GPIO reg */
+       struct i2c_adapter adapter;
+       struct i2c_algo_bit_data algo;
+        u8 slave_addr;
+};
+
+struct intel_framebuffer {
+       struct drm_framebuffer base;
+       struct drm_gem_object *obj;
+};
+
+
+struct intel_output {
+       struct drm_connector base;
+
+       struct drm_encoder enc;
+       int type;
+       struct intel_i2c_chan *i2c_bus; /* for control functions */
+       struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
+       bool load_detect_temp;
+       void *dev_priv;
+};
+
+struct intel_crtc {
+       struct drm_crtc base;
+       int pipe;
+       int plane;
+       uint32_t cursor_addr;
+       u8 lut_r[256], lut_g[256], lut_b[256];
+       int dpms_mode;
+       struct intel_framebuffer *fbdev_fb;
+       /* a mode_set for fbdev users on this crtc */
+       struct drm_mode_set mode_set;
+};
+
+#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+#define to_intel_output(x) container_of(x, struct intel_output, base)
+#define enc_to_intel_output(x) container_of(x, struct intel_output, enc)
+#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+
+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
+                                       const char *name);
+void intel_i2c_destroy(struct intel_i2c_chan *chan);
+int intel_ddc_get_modes(struct intel_output *intel_output);
+extern bool intel_ddc_probe(struct intel_output *intel_output);
+
+extern void intel_crt_init(struct drm_device *dev);
+extern void intel_sdvo_init(struct drm_device *dev, int output_device);
+extern void intel_dvo_init(struct drm_device *dev);
+extern void intel_tv_init(struct drm_device *dev);
+extern void intel_lvds_init(struct drm_device *dev);
+
+extern void intel_crtc_load_lut(struct drm_crtc *crtc);
+extern void intel_encoder_prepare (struct drm_encoder *encoder);
+extern void intel_encoder_commit (struct drm_encoder *encoder);
+
+extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+
+extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+                                                   struct drm_crtc *crtc);
+extern void intel_wait_for_vblank(struct drm_device *dev);
+extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
+extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
+                                                  struct drm_display_mode *mode,
+                                                  int *dpms_mode);
+extern void intel_release_load_detect_pipe(struct intel_output *intel_output,
+                                          int dpms_mode);
+
+extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
+extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
+extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
+extern int intelfb_probe(struct drm_device *dev);
+extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
+extern void intelfb_restore(void);
+extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+                                   u16 blue, int regno);
+
+extern int intel_framebuffer_create(struct drm_device *dev,
+                                   struct drm_mode_fb_cmd *mode_cmd,
+                                   struct drm_framebuffer **fb,
+                                   struct drm_gem_object *obj);
+#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
new file mode 100644 (file)
index 0000000..8b8d6e6
--- /dev/null
@@ -0,0 +1,495 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "dvo.h"
+
+#define SIL164_ADDR    0x38
+#define CH7xxx_ADDR    0x76
+#define TFP410_ADDR    0x38
+
+static struct intel_dvo_device intel_dvo_devices[] = {
+       {
+               .type = INTEL_DVO_CHIP_TMDS,
+               .name = "sil164",
+               .dvo_reg = DVOC,
+               .slave_addr = SIL164_ADDR,
+               .dev_ops = &sil164_ops,
+       },
+       {
+               .type = INTEL_DVO_CHIP_TMDS,
+               .name = "ch7xxx",
+               .dvo_reg = DVOC,
+               .slave_addr = CH7xxx_ADDR,
+               .dev_ops = &ch7xxx_ops,
+       },
+       {
+               .type = INTEL_DVO_CHIP_LVDS,
+               .name = "ivch",
+               .dvo_reg = DVOA,
+               .slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
+               .dev_ops = &ivch_ops,
+       },
+       {
+               .type = INTEL_DVO_CHIP_TMDS,
+               .name = "tfp410",
+               .dvo_reg = DVOC,
+               .slave_addr = TFP410_ADDR,
+               .dev_ops = &tfp410_ops,
+       },
+       {
+               .type = INTEL_DVO_CHIP_LVDS,
+               .name = "ch7017",
+               .dvo_reg = DVOC,
+               .slave_addr = 0x75,
+               .gpio = GPIOE,
+               .dev_ops = &ch7017_ops,
+       }
+};
+
+static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct intel_output *intel_output = enc_to_intel_output(encoder);
+       struct intel_dvo_device *dvo = intel_output->dev_priv;
+       u32 dvo_reg = dvo->dvo_reg;
+       u32 temp = I915_READ(dvo_reg);
+
+       if (mode == DRM_MODE_DPMS_ON) {
+               I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+               I915_READ(dvo_reg);
+               dvo->dev_ops->dpms(dvo, mode);
+       } else {
+               dvo->dev_ops->dpms(dvo, mode);
+               I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
+               I915_READ(dvo_reg);
+       }
+}
+
+static void intel_dvo_save(struct drm_connector *connector)
+{
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+       /* Each output should probably just save the registers it touches,
+        * but for now, use more overkill.
+        */
+       dev_priv->saveDVOA = I915_READ(DVOA);
+       dev_priv->saveDVOB = I915_READ(DVOB);
+       dev_priv->saveDVOC = I915_READ(DVOC);
+
+       dvo->dev_ops->save(dvo);
+}
+
+static void intel_dvo_restore(struct drm_connector *connector)
+{
+       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+       dvo->dev_ops->restore(dvo);
+
+       I915_WRITE(DVOA, dev_priv->saveDVOA);
+       I915_WRITE(DVOB, dev_priv->saveDVOB);
+       I915_WRITE(DVOC, dev_priv->saveDVOC);
+}
+
+static int intel_dvo_mode_valid(struct drm_connector *connector,
+                               struct drm_display_mode *mode)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       /* XXX: Validate clock range */
+
+       if (dvo->panel_fixed_mode) {
+               if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay)
+                       return MODE_PANEL;
+               if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay)
+                       return MODE_PANEL;
+       }
+
+       return dvo->dev_ops->mode_valid(dvo, mode);
+}
+
+static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
+                                struct drm_display_mode *mode,
+                                struct drm_display_mode *adjusted_mode)
+{
+       struct intel_output *intel_output = enc_to_intel_output(encoder);
+       struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+       /* If we have timings from the BIOS for the panel, put them in
+        * to the adjusted mode.  The CRTC will be set up for this mode,
+        * with the panel scaling set up to source from the H/VDisplay
+        * of the original mode.
+        */
+       if (dvo->panel_fixed_mode != NULL) {
+#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x
+               C(hdisplay);
+               C(hsync_start);
+               C(hsync_end);
+               C(htotal);
+               C(vdisplay);
+               C(vsync_start);
+               C(vsync_end);
+               C(vtotal);
+               C(clock);
+               drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+#undef C
+       }
+
+       if (dvo->dev_ops->mode_fixup)
+               return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode);
+
+       return true;
+}
+
+static void intel_dvo_mode_set(struct drm_encoder *encoder,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       struct intel_output *intel_output = enc_to_intel_output(encoder);
+       struct intel_dvo_device *dvo = intel_output->dev_priv;
+       int pipe = intel_crtc->pipe;
+       u32 dvo_val;
+       u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
+       int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+
+       switch (dvo_reg) {
+       case DVOA:
+       default:
+               dvo_srcdim_reg = DVOA_SRCDIM;
+               break;
+       case DVOB:
+               dvo_srcdim_reg = DVOB_SRCDIM;
+               break;
+       case DVOC:
+               dvo_srcdim_reg = DVOC_SRCDIM;
+               break;
+       }
+
+       dvo->dev_ops->mode_set(dvo, mode, adjusted_mode);
+
+       /* Save the data order, since I don't know what it should be set to. */
+       dvo_val = I915_READ(dvo_reg) &
+                 (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
+       dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
+                  DVO_BLANK_ACTIVE_HIGH;
+
+       if (pipe == 1)
+               dvo_val |= DVO_PIPE_B_SELECT;
+       dvo_val |= DVO_PIPE_STALL;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+               dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+               dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
+
+       I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
+
+       /*I915_WRITE(DVOB_SRCDIM,
+         (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+         (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
+       I915_WRITE(dvo_srcdim_reg,
+                  (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+                  (adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
+       /*I915_WRITE(DVOB, dvo_val);*/
+       I915_WRITE(dvo_reg, dvo_val);
+}
+
+/**
+ * Detect the output connection on our DVO device.
+ *
+ * Unimplemented.
+ */
+static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+       return dvo->dev_ops->detect(dvo);
+}
+
+static int intel_dvo_get_modes(struct drm_connector *connector)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+       /* We should probably have an i2c driver get_modes function for those
+        * devices which will have a fixed set of modes determined by the chip
+        * (TV-out, for example), but for now with just TMDS and LVDS,
+        * that's not the case.
+        */
+       intel_ddc_get_modes(intel_output);
+       if (!list_empty(&connector->probed_modes))
+               return 1;
+
+
+       if (dvo->panel_fixed_mode != NULL) {
+               struct drm_display_mode *mode;
+               mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode);
+               if (mode) {
+                       drm_mode_probed_add(connector, mode);
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+static void intel_dvo_destroy (struct drm_connector *connector)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_dvo_device *dvo = intel_output->dev_priv;
+
+       if (dvo) {
+               if (dvo->dev_ops->destroy)
+                       dvo->dev_ops->destroy(dvo);
+               if (dvo->panel_fixed_mode)
+                       kfree(dvo->panel_fixed_mode);
+               /* no need, in i830_dvoices[] now */
+               //kfree(dvo);
+       }
+       if (intel_output->i2c_bus)
+               intel_i2c_destroy(intel_output->i2c_bus);
+       if (intel_output->ddc_bus)
+               intel_i2c_destroy(intel_output->ddc_bus);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(intel_output);
+}
+
+#ifdef RANDR_GET_CRTC_INTERFACE
+static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_dvo_device *dvo = intel_output->dev_priv;
+       int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
+
+       return intel_pipe_to_crtc(pScrn, pipe);
+}
+#endif
+
+static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
+       .dpms = intel_dvo_dpms,
+       .mode_fixup = intel_dvo_mode_fixup,
+       .prepare = intel_encoder_prepare,
+       .mode_set = intel_dvo_mode_set,
+       .commit = intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs intel_dvo_connector_funcs = {
+       .save = intel_dvo_save,
+       .restore = intel_dvo_restore,
+       .detect = intel_dvo_detect,
+       .destroy = intel_dvo_destroy,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
+       .mode_valid = intel_dvo_mode_valid,
+       .get_modes = intel_dvo_get_modes,
+       .best_encoder = intel_best_encoder,
+};
+
+static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
+       .destroy = intel_dvo_enc_destroy,
+};
+
+
+/**
+ * Attempts to get a fixed panel timing for LVDS (currently only the i830).
+ *
+ * Other chips with DVO LVDS will need to extend this to deal with the LVDS
+ * chip being on DVOB/C and having multiple pipes.
+ */
+static struct drm_display_mode *
+intel_dvo_get_current_mode (struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_dvo_device *dvo = intel_output->dev_priv;
+       uint32_t dvo_reg = dvo->dvo_reg;
+       uint32_t dvo_val = I915_READ(dvo_reg);
+       struct drm_display_mode *mode = NULL;
+
+       /* If the DVO port is active, that'll be the LVDS, so we can pull out
+        * its timings to get how the BIOS set up the panel.
+        */
+       if (dvo_val & DVO_ENABLE) {
+               struct drm_crtc *crtc;
+               int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
+
+               crtc = intel_get_crtc_from_pipe(dev, pipe);
+               if (crtc) {
+                       mode = intel_crtc_mode_get(dev, crtc);
+
+                       if (mode) {
+                               mode->type |= DRM_MODE_TYPE_PREFERRED;
+                               if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
+                                       mode->flags |= DRM_MODE_FLAG_PHSYNC;
+                               if (dvo_val & DVO_VSYNC_ACTIVE_HIGH)
+                                       mode->flags |= DRM_MODE_FLAG_PVSYNC;
+                       }
+               }
+       }
+       return mode;
+}
+
+void intel_dvo_init(struct drm_device *dev)
+{
+       struct intel_output *intel_output;
+       struct intel_dvo_device *dvo;
+       struct intel_i2c_chan *i2cbus = NULL;
+       int ret = 0;
+       int i;
+       int gpio_inited = 0;
+       int encoder_type = DRM_MODE_ENCODER_NONE;
+       intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL);
+       if (!intel_output)
+               return;
+
+       /* Set up the DDC bus */
+       intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
+       if (!intel_output->ddc_bus)
+               goto free_intel;
+
+       /* Now, try to find a controller */
+       for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
+               struct drm_connector *connector = &intel_output->base;
+               int gpio;
+
+               dvo = &intel_dvo_devices[i];
+
+               /* Allow the I2C driver info to specify the GPIO to be used in
+                * special cases, but otherwise default to what's defined
+                * in the spec.
+                */
+               if (dvo->gpio != 0)
+                       gpio = dvo->gpio;
+               else if (dvo->type == INTEL_DVO_CHIP_LVDS)
+                       gpio = GPIOB;
+               else
+                       gpio = GPIOE;
+
+               /* Set up the I2C bus necessary for the chip we're probing.
+                * It appears that everything is on GPIOE except for panels
+                * on i830 laptops, which are on GPIOB (DVOA).
+                */
+               if (gpio_inited != gpio) {
+                       if (i2cbus != NULL)
+                               intel_i2c_destroy(i2cbus);
+                       if (!(i2cbus = intel_i2c_create(dev, gpio,
+                               gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
+                               continue;
+                       }
+                       gpio_inited = gpio;
+               }
+
+               if (dvo->dev_ops!= NULL)
+                       ret = dvo->dev_ops->init(dvo, i2cbus);
+               else
+                       ret = false;
+
+               if (!ret)
+                       continue;
+
+               intel_output->type = INTEL_OUTPUT_DVO;
+               switch (dvo->type) {
+               case INTEL_DVO_CHIP_TMDS:
+                       drm_connector_init(dev, connector,
+                                          &intel_dvo_connector_funcs,
+                                          DRM_MODE_CONNECTOR_DVII);
+                       encoder_type = DRM_MODE_ENCODER_TMDS;
+                       break;
+               case INTEL_DVO_CHIP_LVDS:
+                       drm_connector_init(dev, connector,
+                                          &intel_dvo_connector_funcs,
+                                          DRM_MODE_CONNECTOR_LVDS);
+                       encoder_type = DRM_MODE_ENCODER_LVDS;
+                       break;
+               }
+
+               drm_connector_helper_add(connector,
+                                        &intel_dvo_connector_helper_funcs);
+               connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+               connector->interlace_allowed = false;
+               connector->doublescan_allowed = false;
+
+               intel_output->dev_priv = dvo;
+               intel_output->i2c_bus = i2cbus;
+
+               drm_encoder_init(dev, &intel_output->enc,
+                                &intel_dvo_enc_funcs, encoder_type);
+               drm_encoder_helper_add(&intel_output->enc,
+                                      &intel_dvo_helper_funcs);
+
+               drm_mode_connector_attach_encoder(&intel_output->base,
+                                                 &intel_output->enc);
+               if (dvo->type == INTEL_DVO_CHIP_LVDS) {
+                       /* For our LVDS chipsets, we should hopefully be able
+                        * to dig the fixed panel mode out of the BIOS data.
+                        * However, it's in a different format from the BIOS
+                        * data on chipsets with integrated LVDS (stored in AIM
+                        * headers, likely), so for now, just get the current
+                        * mode being output through DVO.
+                        */
+                       dvo->panel_fixed_mode =
+                               intel_dvo_get_current_mode(connector);
+                       dvo->panel_wants_dither = true;
+               }
+
+               drm_sysfs_connector_add(connector);
+               return;
+       }
+
+       intel_i2c_destroy(intel_output->ddc_bus);
+       /* Didn't find a chip, so tear down. */
+       if (i2cbus != NULL)
+               intel_i2c_destroy(i2cbus);
+free_intel:
+       kfree(intel_output);
+}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
new file mode 100644 (file)
index 0000000..afd1217
--- /dev/null
@@ -0,0 +1,925 @@
+/*
+ * Copyright Â© 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+struct intelfb_par {
+       struct drm_device *dev;
+       struct drm_display_mode *our_mode;
+       struct intel_framebuffer *intel_fb;
+       int crtc_count;
+       /* crtc currently bound to this */
+       uint32_t crtc_ids[2];
+};
+
+static int intelfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+                       unsigned blue, unsigned transp,
+                       struct fb_info *info)
+{
+       struct intelfb_par *par = info->par;
+       struct drm_device *dev = par->dev;
+       struct drm_crtc *crtc;
+       int i;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+               struct drm_mode_set *modeset = &intel_crtc->mode_set;
+               struct drm_framebuffer *fb = modeset->fb;
+
+               for (i = 0; i < par->crtc_count; i++)
+                       if (crtc->base.id == par->crtc_ids[i])
+                               break;
+
+               if (i == par->crtc_count)
+                       continue;
+
+
+               if (regno > 255)
+                       return 1;
+
+               if (fb->depth == 8) {
+                       intel_crtc_fb_gamma_set(crtc, red, green, blue, regno);
+                       return 0;
+               }
+
+               if (regno < 16) {
+                       switch (fb->depth) {
+                       case 15:
+                               fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
+                                       ((green & 0xf800) >>  6) |
+                                       ((blue & 0xf800) >> 11);
+                               break;
+                       case 16:
+                               fb->pseudo_palette[regno] = (red & 0xf800) |
+                                       ((green & 0xfc00) >>  5) |
+                                       ((blue  & 0xf800) >> 11);
+                               break;
+                       case 24:
+                       case 32:
+                               fb->pseudo_palette[regno] = ((red & 0xff00) << 8) |
+                                       (green & 0xff00) |
+                                       ((blue  & 0xff00) >> 8);
+                               break;
+                       }
+               }
+       }
+       return 0;
+}
+
+static int intelfb_check_var(struct fb_var_screeninfo *var,
+                       struct fb_info *info)
+{
+       struct intelfb_par *par = info->par;
+       struct intel_framebuffer *intel_fb = par->intel_fb;
+       struct drm_framebuffer *fb = &intel_fb->base;
+       int depth;
+
+       if (var->pixclock == -1 || !var->pixclock)
+               return -EINVAL;
+
+       /* Need to resize the fb object !!! */
+       if (var->xres > fb->width || var->yres > fb->height) {
+               DRM_ERROR("Requested width/height is greater than current fb object %dx%d > %dx%d\n",var->xres,var->yres,fb->width,fb->height);
+               DRM_ERROR("Need resizing code.\n");
+               return -EINVAL;
+       }
+
+       switch (var->bits_per_pixel) {
+       case 16:
+               depth = (var->green.length == 6) ? 16 : 15;
+               break;
+       case 32:
+               depth = (var->transp.length > 0) ? 32 : 24;
+               break;
+       default:
+               depth = var->bits_per_pixel;
+               break;
+       }
+
+       switch (depth) {
+       case 8:
+               var->red.offset = 0;
+               var->green.offset = 0;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 15:
+               var->red.offset = 10;
+               var->green.offset = 5;
+               var->blue.offset = 0;
+               var->red.length = 5;
+               var->green.length = 5;
+               var->blue.length = 5;
+               var->transp.length = 1;
+               var->transp.offset = 15;
+               break;
+       case 16:
+               var->red.offset = 11;
+               var->green.offset = 5;
+               var->blue.offset = 0;
+               var->red.length = 5;
+               var->green.length = 6;
+               var->blue.length = 5;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 24:
+               var->red.offset = 16;
+               var->green.offset = 8;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               break;
+       case 32:
+               var->red.offset = 16;
+               var->green.offset = 8;
+               var->blue.offset = 0;
+               var->red.length = 8;
+               var->green.length = 8;
+               var->blue.length = 8;
+               var->transp.length = 8;
+               var->transp.offset = 24;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* this will let fbcon do the mode init */
+/* FIXME: take mode config lock? */
+static int intelfb_set_par(struct fb_info *info)
+{
+       struct intelfb_par *par = info->par;
+       struct drm_device *dev = par->dev;
+       struct fb_var_screeninfo *var = &info->var;
+       int i;
+
+       DRM_DEBUG("%d %d\n", var->xres, var->pixclock);
+
+       if (var->pixclock != -1) {
+
+               DRM_ERROR("PIXEL CLCOK SET\n");
+               return -EINVAL;
+       } else {
+               struct drm_crtc *crtc;
+               int ret;
+
+               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+                       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+                       for (i = 0; i < par->crtc_count; i++)
+                               if (crtc->base.id == par->crtc_ids[i])
+                                       break;
+
+                       if (i == par->crtc_count)
+                               continue;
+
+                       if (crtc->fb == intel_crtc->mode_set.fb) {
+                               mutex_lock(&dev->mode_config.mutex);
+                               ret = crtc->funcs->set_config(&intel_crtc->mode_set);
+                               mutex_unlock(&dev->mode_config.mutex);
+                               if (ret)
+                                       return ret;
+                       }
+               }
+               return 0;
+       }
+}
+
+static int intelfb_pan_display(struct fb_var_screeninfo *var,
+                               struct fb_info *info)
+{
+       struct intelfb_par *par = info->par;
+       struct drm_device *dev = par->dev;
+       struct drm_mode_set *modeset;
+       struct drm_crtc *crtc;
+       struct intel_crtc *intel_crtc;
+       int ret = 0;
+       int i;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               for (i = 0; i < par->crtc_count; i++)
+                       if (crtc->base.id == par->crtc_ids[i])
+                               break;
+
+               if (i == par->crtc_count)
+                       continue;
+
+               intel_crtc = to_intel_crtc(crtc);
+               modeset = &intel_crtc->mode_set;
+
+               modeset->x = var->xoffset;
+               modeset->y = var->yoffset;
+
+               if (modeset->num_connectors) {
+                       mutex_lock(&dev->mode_config.mutex);
+                       ret = crtc->funcs->set_config(modeset);
+                       mutex_unlock(&dev->mode_config.mutex);
+                       if (!ret) {
+                               info->var.xoffset = var->xoffset;
+                               info->var.yoffset = var->yoffset;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+static void intelfb_on(struct fb_info *info)
+{
+       struct intelfb_par *par = info->par;
+       struct drm_device *dev = par->dev;
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       int i;
+
+       /*
+        * For each CRTC in this fb, find all associated encoders
+        * and turn them off, then turn off the CRTC.
+        */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+               for (i = 0; i < par->crtc_count; i++)
+                       if (crtc->base.id == par->crtc_ids[i])
+                               break;
+
+               crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+
+               /* Found a CRTC on this fb, now find encoders */
+               list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+                       if (encoder->crtc == crtc) {
+                               struct drm_encoder_helper_funcs *encoder_funcs;
+                               encoder_funcs = encoder->helper_private;
+                               encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+                       }
+               }
+       }
+}
+
+static void intelfb_off(struct fb_info *info, int dpms_mode)
+{
+       struct intelfb_par *par = info->par;
+       struct drm_device *dev = par->dev;
+       struct drm_crtc *crtc;
+       struct drm_encoder *encoder;
+       int i;
+
+       /*
+        * For each CRTC in this fb, find all associated encoders
+        * and turn them off, then turn off the CRTC.
+        */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+               for (i = 0; i < par->crtc_count; i++)
+                       if (crtc->base.id == par->crtc_ids[i])
+                               break;
+
+               /* Found a CRTC on this fb, now find encoders */
+               list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+                       if (encoder->crtc == crtc) {
+                               struct drm_encoder_helper_funcs *encoder_funcs;
+                               encoder_funcs = encoder->helper_private;
+                               encoder_funcs->dpms(encoder, dpms_mode);
+                       }
+               }
+               if (dpms_mode == DRM_MODE_DPMS_OFF)
+                       crtc_funcs->dpms(crtc, dpms_mode);
+       }
+}
+
+static int intelfb_blank(int blank, struct fb_info *info)
+{
+       switch (blank) {
+       case FB_BLANK_UNBLANK:
+               intelfb_on(info);
+               break;
+       case FB_BLANK_NORMAL:
+               intelfb_off(info, DRM_MODE_DPMS_STANDBY);
+               break;
+       case FB_BLANK_HSYNC_SUSPEND:
+               intelfb_off(info, DRM_MODE_DPMS_STANDBY);
+               break;
+       case FB_BLANK_VSYNC_SUSPEND:
+               intelfb_off(info, DRM_MODE_DPMS_SUSPEND);
+               break;
+       case FB_BLANK_POWERDOWN:
+               intelfb_off(info, DRM_MODE_DPMS_OFF);
+               break;
+       }
+       return 0;
+}
+
+static struct fb_ops intelfb_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = intelfb_check_var,
+       .fb_set_par = intelfb_set_par,
+       .fb_setcolreg = intelfb_setcolreg,
+       .fb_fillrect = cfb_fillrect,
+       .fb_copyarea = cfb_copyarea,
+       .fb_imageblit = cfb_imageblit,
+       .fb_pan_display = intelfb_pan_display,
+       .fb_blank = intelfb_blank,
+};
+
+/**
+ * Curretly it is assumed that the old framebuffer is reused.
+ *
+ * LOCKING
+ * caller should hold the mode config lock.
+ *
+ */
+int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
+{
+       struct fb_info *info;
+       struct drm_framebuffer *fb;
+       struct drm_display_mode *mode = crtc->desired_mode;
+
+       fb = crtc->fb;
+       if (!fb)
+               return 1;
+
+       info = fb->fbdev;
+       if (!info)
+               return 1;
+
+       if (!mode)
+               return 1;
+
+       info->var.xres = mode->hdisplay;
+       info->var.right_margin = mode->hsync_start - mode->hdisplay;
+       info->var.hsync_len = mode->hsync_end - mode->hsync_start;
+       info->var.left_margin = mode->htotal - mode->hsync_end;
+       info->var.yres = mode->vdisplay;
+       info->var.lower_margin = mode->vsync_start - mode->vdisplay;
+       info->var.vsync_len = mode->vsync_end - mode->vsync_start;
+       info->var.upper_margin = mode->vtotal - mode->vsync_end;
+       info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
+       /* avoid overflow */
+       info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
+
+       return 0;
+}
+EXPORT_SYMBOL(intelfb_resize);
+
+static struct drm_mode_set kernelfb_mode;
+
+static int intelfb_panic(struct notifier_block *n, unsigned long ununsed,
+                        void *panic_str)
+{
+       DRM_ERROR("panic occurred, switching back to text console\n");
+
+       intelfb_restore();
+       return 0;
+}
+
+static struct notifier_block paniced = {
+       .notifier_call = intelfb_panic,
+};
+
+static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
+                         uint32_t fb_height, uint32_t surface_width,
+                         uint32_t surface_height,
+                         struct intel_framebuffer **intel_fb_p)
+{
+       struct fb_info *info;
+       struct intelfb_par *par;
+       struct drm_framebuffer *fb;
+       struct intel_framebuffer *intel_fb;
+       struct drm_mode_fb_cmd mode_cmd;
+       struct drm_gem_object *fbo = NULL;
+       struct drm_i915_gem_object *obj_priv;
+       struct device *device = &dev->pdev->dev;
+       int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+
+       mode_cmd.width = surface_width;
+       mode_cmd.height = surface_height;
+
+       mode_cmd.bpp = 32;
+       mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
+       mode_cmd.depth = 24;
+
+       size = mode_cmd.pitch * mode_cmd.height;
+       size = ALIGN(size, PAGE_SIZE);
+       fbo = drm_gem_object_alloc(dev, size);
+       if (!fbo) {
+               printk(KERN_ERR "failed to allocate framebuffer\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+       obj_priv = fbo->driver_private;
+
+       mutex_lock(&dev->struct_mutex);
+
+       ret = i915_gem_object_pin(fbo, PAGE_SIZE);
+       if (ret) {
+               DRM_ERROR("failed to pin fb: %d\n", ret);
+               goto out_unref;
+       }
+
+       /* Flush everything out, we'll be doing GTT only from now on */
+       i915_gem_object_set_to_gtt_domain(fbo, 1);
+
+       ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
+       if (ret) {
+               DRM_ERROR("failed to allocate fb.\n");
+               goto out_unref;
+       }
+
+       list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
+
+       intel_fb = to_intel_framebuffer(fb);
+       *intel_fb_p = intel_fb;
+
+       info = framebuffer_alloc(sizeof(struct intelfb_par), device);
+       if (!info) {
+               ret = -ENOMEM;
+               goto out_unref;
+       }
+
+       par = info->par;
+
+       strcpy(info->fix.id, "inteldrmfb");
+       info->fix.type = FB_TYPE_PACKED_PIXELS;
+       info->fix.visual = FB_VISUAL_TRUECOLOR;
+       info->fix.type_aux = 0;
+       info->fix.xpanstep = 1; /* doing it in hw */
+       info->fix.ypanstep = 1; /* doing it in hw */
+       info->fix.ywrapstep = 0;
+       info->fix.accel = FB_ACCEL_I830;
+       info->fix.type_aux = 0;
+
+       info->flags = FBINFO_DEFAULT;
+
+       info->fbops = &intelfb_ops;
+
+       info->fix.line_length = fb->pitch;
+       info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
+       info->fix.smem_len = size;
+
+       info->flags = FBINFO_DEFAULT;
+
+       info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
+                                      size);
+       if (!info->screen_base) {
+               ret = -ENOSPC;
+               goto out_unref;
+       }
+       info->screen_size = size;
+
+//     memset(info->screen_base, 0, size);
+
+       info->pseudo_palette = fb->pseudo_palette;
+       info->var.xres_virtual = fb->width;
+       info->var.yres_virtual = fb->height;
+       info->var.bits_per_pixel = fb->bits_per_pixel;
+       info->var.xoffset = 0;
+       info->var.yoffset = 0;
+       info->var.activate = FB_ACTIVATE_NOW;
+       info->var.height = -1;
+       info->var.width = -1;
+
+       info->var.xres = fb_width;
+       info->var.yres = fb_height;
+
+       /* FIXME: we really shouldn't expose mmio space at all */
+       info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
+       info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar);
+
+       info->pixmap.size = 64*1024;
+       info->pixmap.buf_align = 8;
+       info->pixmap.access_align = 32;
+       info->pixmap.flags = FB_PIXMAP_SYSTEM;
+       info->pixmap.scan_align = 1;
+
+       switch(fb->depth) {
+       case 8:
+               info->var.red.offset = 0;
+               info->var.green.offset = 0;
+               info->var.blue.offset = 0;
+               info->var.red.length = 8; /* 8bit DAC */
+               info->var.green.length = 8;
+               info->var.blue.length = 8;
+               info->var.transp.offset = 0;
+               info->var.transp.length = 0;
+               break;
+       case 15:
+               info->var.red.offset = 10;
+               info->var.green.offset = 5;
+               info->var.blue.offset = 0;
+               info->var.red.length = 5;
+               info->var.green.length = 5;
+               info->var.blue.length = 5;
+               info->var.transp.offset = 15;
+               info->var.transp.length = 1;
+               break;
+       case 16:
+               info->var.red.offset = 11;
+               info->var.green.offset = 5;
+               info->var.blue.offset = 0;
+               info->var.red.length = 5;
+               info->var.green.length = 6;
+               info->var.blue.length = 5;
+               info->var.transp.offset = 0;
+               break;
+       case 24:
+               info->var.red.offset = 16;
+               info->var.green.offset = 8;
+               info->var.blue.offset = 0;
+               info->var.red.length = 8;
+               info->var.green.length = 8;
+               info->var.blue.length = 8;
+               info->var.transp.offset = 0;
+               info->var.transp.length = 0;
+               break;
+       case 32:
+               info->var.red.offset = 16;
+               info->var.green.offset = 8;
+               info->var.blue.offset = 0;
+               info->var.red.length = 8;
+               info->var.green.length = 8;
+               info->var.blue.length = 8;
+               info->var.transp.offset = 24;
+               info->var.transp.length = 8;
+               break;
+       default:
+               break;
+       }
+
+       fb->fbdev = info;
+
+       par->intel_fb = intel_fb;
+       par->dev = dev;
+
+       /* To allow resizeing without swapping buffers */
+       printk("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
+              intel_fb->base.height, obj_priv->gtt_offset, fbo);
+
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+
+out_unref:
+       drm_gem_object_unreference(fbo);
+       mutex_unlock(&dev->struct_mutex);
+out:
+       return ret;
+}
+
+static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *crtc)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_framebuffer *intel_fb;
+       struct drm_framebuffer *fb;
+       struct drm_connector *connector;
+       struct fb_info *info;
+       struct intelfb_par *par;
+       struct drm_mode_set *modeset;
+       unsigned int width, height;
+       int new_fb = 0;
+       int ret, i, conn_count;
+
+       if (!drm_helper_crtc_in_use(crtc))
+               return 0;
+
+       if (!crtc->desired_mode)
+               return 0;
+
+       width = crtc->desired_mode->hdisplay;
+       height = crtc->desired_mode->vdisplay;
+
+       /* is there an fb bound to this crtc already */
+       if (!intel_crtc->mode_set.fb) {
+               ret = intelfb_create(dev, width, height, width, height, &intel_fb);
+               if (ret)
+                       return -EINVAL;
+               new_fb = 1;
+       } else {
+               fb = intel_crtc->mode_set.fb;
+               intel_fb = to_intel_framebuffer(fb);
+               if ((intel_fb->base.width < width) || (intel_fb->base.height < height))
+                       return -EINVAL;
+       }
+
+       info = intel_fb->base.fbdev;
+       par = info->par;
+
+       modeset = &intel_crtc->mode_set;
+       modeset->fb = &intel_fb->base;
+       conn_count = 0;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (connector->encoder)
+                       if (connector->encoder->crtc == modeset->crtc) {
+                               modeset->connectors[conn_count] = connector;
+                               conn_count++;
+                               if (conn_count > INTELFB_CONN_LIMIT)
+                                       BUG();
+                       }
+       }
+
+       for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
+               modeset->connectors[i] = NULL;
+
+       par->crtc_ids[0] = crtc->base.id;
+
+       modeset->num_connectors = conn_count;
+       if (modeset->mode != modeset->crtc->desired_mode)
+               modeset->mode = modeset->crtc->desired_mode;
+
+       par->crtc_count = 1;
+
+       if (new_fb) {
+               info->var.pixclock = -1;
+               if (register_framebuffer(info) < 0)
+                       return -EINVAL;
+       } else
+               intelfb_set_par(info);
+
+       printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+              info->fix.id);
+
+       /* Switch back to kernel console on panic */
+       kernelfb_mode = *modeset;
+       atomic_notifier_chain_register(&panic_notifier_list, &paniced);
+       printk(KERN_INFO "registered panic notifier\n");
+
+       return 0;
+}
+
+static int intelfb_multi_fb_probe(struct drm_device *dev)
+{
+
+       struct drm_crtc *crtc;
+       int ret = 0;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               ret = intelfb_multi_fb_probe_crtc(dev, crtc);
+               if (ret)
+                       return ret;
+       }
+       return ret;
+}
+
+static int intelfb_single_fb_probe(struct drm_device *dev)
+{
+       struct drm_crtc *crtc;
+       struct drm_connector *connector;
+       unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
+       unsigned int surface_width = 0, surface_height = 0;
+       int new_fb = 0;
+       int crtc_count = 0;
+       int ret, i, conn_count = 0;
+       struct intel_framebuffer *intel_fb;
+       struct fb_info *info;
+       struct intelfb_par *par;
+       struct drm_mode_set *modeset = NULL;
+
+       DRM_DEBUG("\n");
+
+       /* Get a count of crtcs now in use and new min/maxes width/heights */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               if (!drm_helper_crtc_in_use(crtc))
+                       continue;
+
+               crtc_count++;
+               if (!crtc->desired_mode)
+                       continue;
+
+               /* Smallest mode determines console size... */
+               if (crtc->desired_mode->hdisplay < fb_width)
+                       fb_width = crtc->desired_mode->hdisplay;
+
+               if (crtc->desired_mode->vdisplay < fb_height)
+                       fb_height = crtc->desired_mode->vdisplay;
+
+               /* ... but largest for memory allocation dimensions */
+               if (crtc->desired_mode->hdisplay > surface_width)
+                       surface_width = crtc->desired_mode->hdisplay;
+
+               if (crtc->desired_mode->vdisplay > surface_height)
+                       surface_height = crtc->desired_mode->vdisplay;
+       }
+
+       if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
+               /* hmm everyone went away - assume VGA cable just fell out
+                  and will come back later. */
+               DRM_DEBUG("no CRTCs available?\n");
+               return 0;
+       }
+
+//fail
+       /* Find the fb for our new config */
+       if (list_empty(&dev->mode_config.fb_kernel_list)) {
+               DRM_DEBUG("creating new fb (console size %dx%d, "
+                         "buffer size %dx%d)\n", fb_width, fb_height,
+                         surface_width, surface_height);
+               ret = intelfb_create(dev, fb_width, fb_height, surface_width,
+                                    surface_height, &intel_fb);
+               if (ret)
+                       return -EINVAL;
+               new_fb = 1;
+       } else {
+               struct drm_framebuffer *fb;
+
+               fb = list_first_entry(&dev->mode_config.fb_kernel_list,
+                                     struct drm_framebuffer, filp_head);
+               intel_fb = to_intel_framebuffer(fb);
+
+               /* if someone hotplugs something bigger than we have already
+                * allocated, we are pwned.  As really we can't resize an
+                * fbdev that is in the wild currently due to fbdev not really
+                * being designed for the lower layers moving stuff around
+                * under it.
+                * - so in the grand style of things - punt.
+                */
+               if ((fb->width < surface_width) ||
+                   (fb->height < surface_height)) {
+                       DRM_ERROR("fb not large enough for console\n");
+                       return -EINVAL;
+               }
+       }
+// fail
+
+       info = intel_fb->base.fbdev;
+       par = info->par;
+
+       crtc_count = 0;
+       /*
+        * For each CRTC, set up the connector list for the CRTC's mode
+        * set configuration.
+        */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+               modeset = &intel_crtc->mode_set;
+               modeset->fb = &intel_fb->base;
+               conn_count = 0;
+               list_for_each_entry(connector, &dev->mode_config.connector_list,
+                                   head) {
+                       if (!connector->encoder)
+                               continue;
+
+                       if(connector->encoder->crtc == modeset->crtc) {
+                               modeset->connectors[conn_count++] = connector;
+                               if (conn_count > INTELFB_CONN_LIMIT)
+                                       BUG();
+                       }
+               }
+
+               /* Zero out remaining connector pointers */
+               for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
+                       modeset->connectors[i] = NULL;
+
+               par->crtc_ids[crtc_count++] = crtc->base.id;
+
+               modeset->num_connectors = conn_count;
+               if (modeset->mode != modeset->crtc->desired_mode)
+                       modeset->mode = modeset->crtc->desired_mode;
+       }
+       par->crtc_count = crtc_count;
+
+       if (new_fb) {
+               info->var.pixclock = -1;
+               if (register_framebuffer(info) < 0)
+                       return -EINVAL;
+       } else
+               intelfb_set_par(info);
+
+       printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+              info->fix.id);
+
+       /* Switch back to kernel console on panic */
+       kernelfb_mode = *modeset;
+       atomic_notifier_chain_register(&panic_notifier_list, &paniced);
+       printk(KERN_INFO "registered panic notifier\n");
+
+       return 0;
+}
+
+/**
+ * intelfb_restore - restore the framebuffer console (kernel) config
+ *
+ * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
+ */
+void intelfb_restore(void)
+{
+       drm_crtc_helper_set_config(&kernelfb_mode);
+}
+
+static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
+{
+        intelfb_restore();
+}
+
+static struct sysrq_key_op sysrq_intelfb_restore_op = {
+        .handler = intelfb_sysrq,
+        .help_msg = "force fb",
+        .action_msg = "force restore of fb console",
+};
+
+int intelfb_probe(struct drm_device *dev)
+{
+       int ret;
+
+       DRM_DEBUG("\n");
+
+       /* something has changed in the lower levels of hell - deal with it
+          here */
+
+       /* two modes : a) 1 fb to rule all crtcs.
+                      b) one fb per crtc.
+          two actions 1) new connected device
+                      2) device removed.
+          case a/1 : if the fb surface isn't big enough - resize the surface fb.
+                     if the fb size isn't big enough - resize fb into surface.
+                     if everything big enough configure the new crtc/etc.
+          case a/2 : undo the configuration
+                     possibly resize down the fb to fit the new configuration.
+           case b/1 : see if it is on a new crtc - setup a new fb and add it.
+          case b/2 : teardown the new fb.
+       */
+
+       /* mode a first */
+       /* search for an fb */
+       if (i915_fbpercrtc == 1) {
+               ret = intelfb_multi_fb_probe(dev);
+       } else {
+               ret = intelfb_single_fb_probe(dev);
+       }
+
+       register_sysrq_key('g', &sysrq_intelfb_restore_op);
+
+       return ret;
+}
+EXPORT_SYMBOL(intelfb_probe);
+
+int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+{
+       struct fb_info *info;
+
+       if (!fb)
+               return -EINVAL;
+
+       info = fb->fbdev;
+
+       if (info) {
+               unregister_framebuffer(info);
+               iounmap(info->screen_base);
+               framebuffer_release(info);
+       }
+
+       atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
+       memset(&kernelfb_mode, 0, sizeof(struct drm_mode_set));
+       return 0;
+}
+EXPORT_SYMBOL(intelfb_remove);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
new file mode 100644 (file)
index 0000000..a5a2f53
--- /dev/null
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright Â© 2006-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/*
+ * Intel GPIO access functions
+ */
+
+#define I2C_RISEFALL_TIME 20
+
+static int get_clock(void *data)
+{
+       struct intel_i2c_chan *chan = data;
+       struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+       u32 val;
+
+       val = I915_READ(chan->reg);
+       return ((val & GPIO_CLOCK_VAL_IN) != 0);
+}
+
+static int get_data(void *data)
+{
+       struct intel_i2c_chan *chan = data;
+       struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+       u32 val;
+
+       val = I915_READ(chan->reg);
+       return ((val & GPIO_DATA_VAL_IN) != 0);
+}
+
+static void set_clock(void *data, int state_high)
+{
+       struct intel_i2c_chan *chan = data;
+       struct drm_device *dev = chan->drm_dev;
+       struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+       u32 reserved = 0, clock_bits;
+
+       /* On most chips, these bits must be preserved in software. */
+       if (!IS_I830(dev) && !IS_845G(dev))
+               reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+                                                  GPIO_CLOCK_PULLUP_DISABLE);
+
+       if (state_high)
+               clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+       else
+               clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+                       GPIO_CLOCK_VAL_MASK;
+       I915_WRITE(chan->reg, reserved | clock_bits);
+       udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+}
+
+static void set_data(void *data, int state_high)
+{
+       struct intel_i2c_chan *chan = data;
+       struct drm_device *dev = chan->drm_dev;
+       struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
+       u32 reserved = 0, data_bits;
+
+       /* On most chips, these bits must be preserved in software. */
+       if (!IS_I830(dev) && !IS_845G(dev))
+               reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+                                                  GPIO_CLOCK_PULLUP_DISABLE);
+
+       if (state_high)
+               data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+       else
+               data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+                       GPIO_DATA_VAL_MASK;
+
+       I915_WRITE(chan->reg, reserved | data_bits);
+       udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+}
+
+/**
+ * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * @dev: DRM device
+ * @output: driver specific output device
+ * @reg: GPIO reg to use
+ * @name: name for this bus
+ *
+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
+ * in output probing and control (e.g. DDC or SDVO control functions).
+ *
+ * Possible values for @reg include:
+ *   %GPIOA
+ *   %GPIOB
+ *   %GPIOC
+ *   %GPIOD
+ *   %GPIOE
+ *   %GPIOF
+ *   %GPIOG
+ *   %GPIOH
+ * see PRM for details on how these different busses are used.
+ */
+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
+                                       const char *name)
+{
+       struct intel_i2c_chan *chan;
+
+       chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
+       if (!chan)
+               goto out_free;
+
+       chan->drm_dev = dev;
+       chan->reg = reg;
+       snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
+       chan->adapter.owner = THIS_MODULE;
+#ifndef I2C_HW_B_INTELFB
+#define I2C_HW_B_INTELFB I2C_HW_B_I810
+#endif
+       chan->adapter.id = I2C_HW_B_INTELFB;
+       chan->adapter.algo_data = &chan->algo;
+       chan->adapter.dev.parent = &dev->pdev->dev;
+       chan->algo.setsda = set_data;
+       chan->algo.setscl = set_clock;
+       chan->algo.getsda = get_data;
+       chan->algo.getscl = get_clock;
+       chan->algo.udelay = 20;
+       chan->algo.timeout = usecs_to_jiffies(2200);
+       chan->algo.data = chan;
+
+       i2c_set_adapdata(&chan->adapter, chan);
+
+       if(i2c_bit_add_bus(&chan->adapter))
+               goto out_free;
+
+       /* JJJ:  raise SCL and SDA? */
+       set_data(chan, 1);
+       set_clock(chan, 1);
+       udelay(20);
+
+       return chan;
+
+out_free:
+       kfree(chan);
+       return NULL;
+}
+
+/**
+ * intel_i2c_destroy - unregister and free i2c bus resources
+ * @output: channel to free
+ *
+ * Unregister the adapter from the i2c layer, then free the structure.
+ */
+void intel_i2c_destroy(struct intel_i2c_chan *chan)
+{
+       if (!chan)
+               return;
+
+       i2c_del_adapter(&chan->adapter);
+       kfree(chan);
+}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
new file mode 100644 (file)
index 0000000..ccecfaf
--- /dev/null
@@ -0,0 +1,525 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/**
+ * Sets the backlight level.
+ *
+ * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
+ */
+static void intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 blc_pwm_ctl;
+
+       blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+       I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
+                                (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+}
+
+/**
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
+               BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void intel_lvds_set_power(struct drm_device *dev, bool on)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 pp_status;
+
+       if (on) {
+               I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+                          POWER_TARGET_ON);
+               do {
+                       pp_status = I915_READ(PP_STATUS);
+               } while ((pp_status & PP_ON) == 0);
+
+               intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
+       } else {
+               intel_lvds_set_backlight(dev, 0);
+
+               I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) &
+                          ~POWER_TARGET_ON);
+               do {
+                       pp_status = I915_READ(PP_STATUS);
+               } while (pp_status & PP_ON);
+       }
+}
+
+static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+
+       if (mode == DRM_MODE_DPMS_ON)
+               intel_lvds_set_power(dev, true);
+       else
+               intel_lvds_set_power(dev, false);
+
+       /* XXX: We never power down the LVDS pairs. */
+}
+
+static void intel_lvds_save(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       dev_priv->savePP_ON = I915_READ(PP_ON_DELAYS);
+       dev_priv->savePP_OFF = I915_READ(PP_OFF_DELAYS);
+       dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+       dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+       dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+       dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+                                      BACKLIGHT_DUTY_CYCLE_MASK);
+
+       /*
+        * If the light is off at server startup, just make it full brightness
+        */
+       if (dev_priv->backlight_duty_cycle == 0)
+               dev_priv->backlight_duty_cycle =
+                       intel_lvds_get_max_backlight(dev);
+}
+
+static void intel_lvds_restore(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+       I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON);
+       I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF);
+       I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
+       I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+       if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
+               intel_lvds_set_power(dev, true);
+       else
+               intel_lvds_set_power(dev, false);
+}
+
+static int intel_lvds_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
+
+       if (fixed_mode) {
+               if (mode->hdisplay > fixed_mode->hdisplay)
+                       return MODE_PANEL;
+               if (mode->vdisplay > fixed_mode->vdisplay)
+                       return MODE_PANEL;
+       }
+
+       return MODE_OK;
+}
+
+static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       struct drm_encoder *tmp_encoder;
+
+       /* Should never happen!! */
+       if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
+               printk(KERN_ERR "Can't support LVDS on pipe A\n");
+               return false;
+       }
+
+       /* Should never happen!! */
+       list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
+               if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
+                       printk(KERN_ERR "Can't enable LVDS and another "
+                              "encoder on the same pipe\n");
+                       return false;
+               }
+       }
+
+       /*
+        * If we have timings from the BIOS for the panel, put them in
+        * to the adjusted mode.  The CRTC will be set up for this mode,
+        * with the panel scaling set up to source from the H/VDisplay
+        * of the original mode.
+        */
+       if (dev_priv->panel_fixed_mode != NULL) {
+               adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
+               adjusted_mode->hsync_start =
+                       dev_priv->panel_fixed_mode->hsync_start;
+               adjusted_mode->hsync_end =
+                       dev_priv->panel_fixed_mode->hsync_end;
+               adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
+               adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
+               adjusted_mode->vsync_start =
+                       dev_priv->panel_fixed_mode->vsync_start;
+               adjusted_mode->vsync_end =
+                       dev_priv->panel_fixed_mode->vsync_end;
+               adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
+               adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
+               drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+       }
+
+       /*
+        * XXX: It would be nice to support lower refresh rates on the
+        * panels to reduce power consumption, and perhaps match the
+        * user's requested refresh rate.
+        */
+
+       return true;
+}
+
+static void intel_lvds_prepare(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+       dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+                                      BACKLIGHT_DUTY_CYCLE_MASK);
+
+       intel_lvds_set_power(dev, false);
+}
+
+static void intel_lvds_commit( struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (dev_priv->backlight_duty_cycle == 0)
+               dev_priv->backlight_duty_cycle =
+                       intel_lvds_get_max_backlight(dev);
+
+       intel_lvds_set_power(dev, true);
+}
+
+static void intel_lvds_mode_set(struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       u32 pfit_control;
+
+       /*
+        * The LVDS pin pair will already have been turned on in the
+        * intel_crtc_mode_set since it has a large impact on the DPLL
+        * settings.
+        */
+
+       /*
+        * Enable automatic panel scaling so that non-native modes fill the
+        * screen.  Should be enabled before the pipe is enabled, according to
+        * register description and PRM.
+        */
+       if (mode->hdisplay != adjusted_mode->hdisplay ||
+           mode->vdisplay != adjusted_mode->vdisplay)
+               pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+                               HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+                               HORIZ_INTERP_BILINEAR);
+       else
+               pfit_control = 0;
+
+       if (!IS_I965G(dev)) {
+               if (dev_priv->panel_wants_dither)
+                       pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+       }
+       else
+               pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
+
+       I915_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.  This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
+{
+       return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int intel_lvds_get_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret = 0;
+
+       ret = intel_ddc_get_modes(intel_output);
+
+       if (ret)
+               return ret;
+
+       /* Didn't get an EDID, so
+        * Set wide sync ranges so we get all modes
+        * handed to valid_mode for checking
+        */
+       connector->display_info.min_vfreq = 0;
+       connector->display_info.max_vfreq = 200;
+       connector->display_info.min_hfreq = 0;
+       connector->display_info.max_hfreq = 200;
+
+       if (dev_priv->panel_fixed_mode != NULL) {
+               struct drm_display_mode *mode;
+
+               mutex_unlock(&dev->mode_config.mutex);
+               mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
+               drm_mode_probed_add(connector, mode);
+               mutex_unlock(&dev->mode_config.mutex);
+
+               return 1;
+       }
+
+       return 0;
+}
+
+/**
+ * intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+static void intel_lvds_destroy(struct drm_connector *connector)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+
+       if (intel_output->ddc_bus)
+               intel_i2c_destroy(intel_output->ddc_bus);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
+       .dpms = intel_lvds_dpms,
+       .mode_fixup = intel_lvds_mode_fixup,
+       .prepare = intel_lvds_prepare,
+       .mode_set = intel_lvds_mode_set,
+       .commit = intel_lvds_commit,
+};
+
+static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
+       .get_modes = intel_lvds_get_modes,
+       .mode_valid = intel_lvds_mode_valid,
+       .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+       .save = intel_lvds_save,
+       .restore = intel_lvds_restore,
+       .detect = intel_lvds_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = intel_lvds_destroy,
+};
+
+
+static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
+       .destroy = intel_lvds_enc_destroy,
+};
+
+
+
+/**
+ * intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void intel_lvds_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_output *intel_output;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       struct drm_display_mode *scan; /* *modes, *bios_mode; */
+       struct drm_crtc *crtc;
+       u32 lvds;
+       int pipe;
+
+       intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+       if (!intel_output) {
+               return;
+       }
+
+       connector = &intel_output->base;
+       encoder = &intel_output->enc;
+       drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs,
+                          DRM_MODE_CONNECTOR_LVDS);
+
+       drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs,
+                        DRM_MODE_ENCODER_LVDS);
+
+       drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
+       intel_output->type = INTEL_OUTPUT_LVDS;
+
+       drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
+       drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
+       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+
+
+       /*
+        * LVDS discovery:
+        * 1) check for EDID on DDC
+        * 2) check for VBT data
+        * 3) check to see if LVDS is already on
+        *    if none of the above, no panel
+        * 4) make sure lid is open
+        *    if closed, act like it's not there for now
+        */
+
+       /* Set up the DDC bus. */
+       intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
+       if (!intel_output->ddc_bus) {
+               dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+                          "failed.\n");
+               goto failed;
+       }
+
+       /*
+        * Attempt to get the fixed panel mode from DDC.  Assume that the
+        * preferred mode is the right one.
+        */
+       intel_ddc_get_modes(intel_output);
+
+       list_for_each_entry(scan, &connector->probed_modes, head) {
+               mutex_lock(&dev->mode_config.mutex);
+               if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+                       dev_priv->panel_fixed_mode =
+                               drm_mode_duplicate(dev, scan);
+                       mutex_unlock(&dev->mode_config.mutex);
+                       goto out; /* FIXME: check for quirks */
+               }
+               mutex_unlock(&dev->mode_config.mutex);
+       }
+
+       /* Failed to get EDID, what about VBT? */
+       if (dev_priv->vbt_mode) {
+               mutex_lock(&dev->mode_config.mutex);
+               dev_priv->panel_fixed_mode =
+                       drm_mode_duplicate(dev, dev_priv->vbt_mode);
+               mutex_unlock(&dev->mode_config.mutex);
+       }
+
+       /*
+        * If we didn't get EDID, try checking if the panel is already turned
+        * on.  If so, assume that whatever is currently programmed is the
+        * correct mode.
+        */
+       lvds = I915_READ(LVDS);
+       pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+       crtc = intel_get_crtc_from_pipe(dev, pipe);
+
+       if (crtc && (lvds & LVDS_PORT_EN)) {
+               dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
+               if (dev_priv->panel_fixed_mode) {
+                       dev_priv->panel_fixed_mode->type |=
+                               DRM_MODE_TYPE_PREFERRED;
+                       goto out; /* FIXME: check for quirks */
+               }
+       }
+
+       /* If we still don't have a mode after all that, give up. */
+       if (!dev_priv->panel_fixed_mode)
+               goto failed;
+
+       /* FIXME: detect aopen & mac mini type stuff automatically? */
+       /*
+        * Blacklist machines with BIOSes that list an LVDS panel without
+        * actually having one.
+        */
+       if (IS_I945GM(dev)) {
+               /* aopen mini pc */
+               if (dev->pdev->subsystem_vendor == 0xa0a0)
+                       goto failed;
+
+               if ((dev->pdev->subsystem_vendor == 0x8086) &&
+                   (dev->pdev->subsystem_device == 0x7270)) {
+                       /* It's a Mac Mini or Macbook Pro.
+                        *
+                        * Apple hardware is out to get us.  The macbook pro
+                        * has a real LVDS panel, but the mac mini does not,
+                        * and they have the same device IDs.  We'll
+                        * distinguish by panel size, on the assumption
+                        * that Apple isn't about to make any machines with an
+                        * 800x600 display.
+                        */
+
+                       if (dev_priv->panel_fixed_mode != NULL &&
+                           dev_priv->panel_fixed_mode->hdisplay == 800 &&
+                           dev_priv->panel_fixed_mode->vdisplay == 600) {
+                               DRM_DEBUG("Suspected Mac Mini, ignoring the LVDS\n");
+                               goto failed;
+                       }
+               }
+       }
+
+
+out:
+       drm_sysfs_connector_add(connector);
+       return;
+
+failed:
+       DRM_DEBUG("No LVDS modes found, disabling.\n");
+       if (intel_output->ddc_bus)
+               intel_i2c_destroy(intel_output->ddc_bus);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
new file mode 100644 (file)
index 0000000..e42019e
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include "drmP.h"
+#include "intel_drv.h"
+
+/**
+ * intel_ddc_probe
+ *
+ */
+bool intel_ddc_probe(struct intel_output *intel_output)
+{
+       u8 out_buf[] = { 0x0, 0x0};
+       u8 buf[2];
+       int ret;
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = 0x50,
+                       .flags = 0,
+                       .len = 1,
+                       .buf = out_buf,
+               },
+               {
+                       .addr = 0x50,
+                       .flags = I2C_M_RD,
+                       .len = 1,
+                       .buf = buf,
+               }
+       };
+
+       ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2);
+       if (ret == 2)
+               return true;
+
+       return false;
+}
+
+/**
+ * intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int intel_ddc_get_modes(struct intel_output *intel_output)
+{
+       struct edid *edid;
+       int ret = 0;
+
+       edid = drm_get_edid(&intel_output->base,
+                           &intel_output->ddc_bus->adapter);
+       if (edid) {
+               drm_mode_connector_update_edid_property(&intel_output->base,
+                                                       edid);
+               ret = drm_add_edid_modes(&intel_output->base, edid);
+               kfree(edid);
+       }
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
new file mode 100644 (file)
index 0000000..fbbaa4f
--- /dev/null
@@ -0,0 +1,1128 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright Â© 2006-2007 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_sdvo_regs.h"
+
+#undef SDVO_DEBUG
+
+struct intel_sdvo_priv {
+       struct intel_i2c_chan *i2c_bus;
+       int slaveaddr;
+       int output_device;
+
+       u16 active_outputs;
+
+       struct intel_sdvo_caps caps;
+       int pixel_clock_min, pixel_clock_max;
+
+       int save_sdvo_mult;
+       u16 save_active_outputs;
+       struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
+       struct intel_sdvo_dtd save_output_dtd[16];
+       u32 save_SDVOX;
+};
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
+{
+       struct drm_device *dev = intel_output->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_sdvo_priv   *sdvo_priv = intel_output->dev_priv;
+       u32 bval = val, cval = val;
+       int i;
+
+       if (sdvo_priv->output_device == SDVOB) {
+               cval = I915_READ(SDVOC);
+       } else {
+               bval = I915_READ(SDVOB);
+       }
+       /*
+        * Write the registers twice for luck. Sometimes,
+        * writing them only once doesn't appear to 'stick'.
+        * The BIOS does this too. Yay, magic
+        */
+       for (i = 0; i < 2; i++)
+       {
+               I915_WRITE(SDVOB, bval);
+               I915_READ(SDVOB);
+               I915_WRITE(SDVOC, cval);
+               I915_READ(SDVOC);
+       }
+}
+
+static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
+                                u8 *ch)
+{
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       u8 out_buf[2];
+       u8 buf[2];
+       int ret;
+
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = sdvo_priv->i2c_bus->slave_addr,
+                       .flags = 0,
+                       .len = 1,
+                       .buf = out_buf,
+               },
+               {
+                       .addr = sdvo_priv->i2c_bus->slave_addr,
+                       .flags = I2C_M_RD,
+                       .len = 1,
+                       .buf = buf,
+               }
+       };
+
+       out_buf[0] = addr;
+       out_buf[1] = 0;
+
+       if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2)
+       {
+               *ch = buf[0];
+               return true;
+       }
+
+       DRM_DEBUG("i2c transfer returned %d\n", ret);
+       return false;
+}
+
+static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
+                                 u8 ch)
+{
+       u8 out_buf[2];
+       struct i2c_msg msgs[] = {
+               {
+                       .addr = intel_output->i2c_bus->slave_addr,
+                       .flags = 0,
+                       .len = 2,
+                       .buf = out_buf,
+               }
+       };
+
+       out_buf[0] = addr;
+       out_buf[1] = ch;
+
+       if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1)
+       {
+               return true;
+       }
+       return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+const static struct _sdvo_cmd_name {
+    u8 cmd;
+    char *name;
+} sdvo_cmd_names[] = {
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+};
+
+#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
+#define SDVO_PRIV(output)   ((struct intel_sdvo_priv *) (output)->dev_priv)
+
+#ifdef SDVO_DEBUG
+static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
+                                  void *args, int args_len)
+{
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       int i;
+
+       DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
+       for (i = 0; i < args_len; i++)
+               printk("%02X ", ((u8 *)args)[i]);
+       for (; i < 8; i++)
+               printk("   ");
+       for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) {
+               if (cmd == sdvo_cmd_names[i].cmd) {
+                       printk("(%s)", sdvo_cmd_names[i].name);
+                       break;
+               }
+       }
+       if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0]))
+               printk("(%02X)",cmd);
+       printk("\n");
+}
+#else
+#define intel_sdvo_debug_write(o, c, a, l)
+#endif
+
+static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
+                                void *args, int args_len)
+{
+       int i;
+
+       intel_sdvo_debug_write(intel_output, cmd, args, args_len);
+
+       for (i = 0; i < args_len; i++) {
+               intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i,
+                                     ((u8*)args)[i]);
+       }
+
+       intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
+}
+
+#ifdef SDVO_DEBUG
+static const char *cmd_status_names[] = {
+       "Power on",
+       "Success",
+       "Not supported",
+       "Invalid arg",
+       "Pending",
+       "Target not specified",
+       "Scaling not supported"
+};
+
+static void intel_sdvo_debug_response(struct intel_output *intel_output,
+                                     void *response, int response_len,
+                                     u8 status)
+{
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+       DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
+       for (i = 0; i < response_len; i++)
+               printk("%02X ", ((u8 *)response)[i]);
+       for (; i < 8; i++)
+               printk("   ");
+       if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+               printk("(%s)", cmd_status_names[status]);
+       else
+               printk("(??? %d)", status);
+       printk("\n");
+}
+#else
+#define intel_sdvo_debug_response(o, r, l, s)
+#endif
+
+static u8 intel_sdvo_read_response(struct intel_output *intel_output,
+                                  void *response, int response_len)
+{
+       int i;
+       u8 status;
+       u8 retry = 50;
+
+       while (retry--) {
+               /* Read the command response */
+               for (i = 0; i < response_len; i++) {
+                       intel_sdvo_read_byte(intel_output,
+                                            SDVO_I2C_RETURN_0 + i,
+                                            &((u8 *)response)[i]);
+               }
+
+               /* read the return status */
+               intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS,
+                                    &status);
+
+               intel_sdvo_debug_response(intel_output, response, response_len,
+                                         status);
+               if (status != SDVO_CMD_STATUS_PENDING)
+                       return status;
+
+               mdelay(50);
+       }
+
+       return status;
+}
+
+static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+       if (mode->clock >= 100000)
+               return 1;
+       else if (mode->clock >= 50000)
+               return 2;
+       else
+               return 4;
+}
+
+/**
+ * Don't check status code from this as it switches the bus back to the
+ * SDVO chips which defeats the purpose of doing a bus switch in the first
+ * place.
+ */
+static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
+                                             u8 target)
+{
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
+}
+
+static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
+{
+       struct intel_sdvo_set_target_input_args targets = {0};
+       u8 status;
+
+       if (target_0 && target_1)
+               return SDVO_CMD_STATUS_NOTSUPP;
+
+       if (target_1)
+               targets.target_1 = 1;
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets,
+                            sizeof(targets));
+
+       status = intel_sdvo_read_response(intel_output, NULL, 0);
+
+       return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2)
+{
+       struct intel_sdvo_get_trained_inputs_response response;
+       u8 status;
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
+       status = intel_sdvo_read_response(intel_output, &response, sizeof(response));
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       *input_1 = response.input0_trained;
+       *input_2 = response.input1_trained;
+       return true;
+}
+
+static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output,
+                                         u16 *outputs)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
+       status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs));
+
+       return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output,
+                                         u16 outputs)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
+                            sizeof(outputs));
+       status = intel_sdvo_read_response(intel_output, NULL, 0);
+       return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output,
+                                              int mode)
+{
+       u8 status, state = SDVO_ENCODER_STATE_ON;
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               state = SDVO_ENCODER_STATE_ON;
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+               state = SDVO_ENCODER_STATE_STANDBY;
+               break;
+       case DRM_MODE_DPMS_SUSPEND:
+               state = SDVO_ENCODER_STATE_SUSPEND;
+               break;
+       case DRM_MODE_DPMS_OFF:
+               state = SDVO_ENCODER_STATE_OFF;
+               break;
+       }
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
+                            sizeof(state));
+       status = intel_sdvo_read_response(intel_output, NULL, 0);
+
+       return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output,
+                                                  int *clock_min,
+                                                  int *clock_max)
+{
+       struct intel_sdvo_pixel_clock_range clocks;
+       u8 status;
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+                            NULL, 0);
+
+       status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks));
+
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       /* Convert the values from units of 10 kHz to kHz. */
+       *clock_min = clocks.min * 10;
+       *clock_max = clocks.max * 10;
+
+       return true;
+}
+
+static bool intel_sdvo_set_target_output(struct intel_output *intel_output,
+                                        u16 outputs)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
+                            sizeof(outputs));
+
+       status = intel_sdvo_read_response(intel_output, NULL, 0);
+       return (status == SDVO_CMD_STATUS_SUCCESS);
+}
+
+static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd,
+                                 struct intel_sdvo_dtd *dtd)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(intel_output, cmd, NULL, 0);
+       status = intel_sdvo_read_response(intel_output, &dtd->part1,
+                                         sizeof(dtd->part1));
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0);
+       status = intel_sdvo_read_response(intel_output, &dtd->part2,
+                                         sizeof(dtd->part2));
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       return true;
+}
+
+static bool intel_sdvo_get_input_timing(struct intel_output *intel_output,
+                                        struct intel_sdvo_dtd *dtd)
+{
+       return intel_sdvo_get_timing(intel_output,
+                                    SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_get_output_timing(struct intel_output *intel_output,
+                                        struct intel_sdvo_dtd *dtd)
+{
+       return intel_sdvo_get_timing(intel_output,
+                                    SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd,
+                                 struct intel_sdvo_dtd *dtd)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1));
+       status = intel_sdvo_read_response(intel_output, NULL, 0);
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+       status = intel_sdvo_read_response(intel_output, NULL, 0);
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       return true;
+}
+
+static bool intel_sdvo_set_input_timing(struct intel_output *intel_output,
+                                        struct intel_sdvo_dtd *dtd)
+{
+       return intel_sdvo_set_timing(intel_output,
+                                    SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_set_output_timing(struct intel_output *intel_output,
+                                        struct intel_sdvo_dtd *dtd)
+{
+       return intel_sdvo_set_timing(intel_output,
+                                    SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+
+static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
+{
+       u8 response, status;
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
+       status = intel_sdvo_read_response(intel_output, &response, 1);
+
+       if (status != SDVO_CMD_STATUS_SUCCESS) {
+               DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
+               return SDVO_CLOCK_RATE_MULT_1X;
+       } else {
+               DRM_DEBUG("Current clock rate multiplier: %d\n", response);
+       }
+
+       return response;
+}
+
+static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+       status = intel_sdvo_read_response(intel_output, NULL, 0);
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       return true;
+}
+
+static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
+{
+       /* Make the CRTC code factor in the SDVO pixel multiplier.  The SDVO
+        * device will be told of the multiplier during mode_set.
+        */
+       adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+       return true;
+}
+
+static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_output *intel_output = enc_to_intel_output(encoder);
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       u16 width, height;
+       u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+       u16 h_sync_offset, v_sync_offset;
+       u32 sdvox;
+       struct intel_sdvo_dtd output_dtd;
+       int sdvo_pixel_multiply;
+
+       if (!mode)
+               return;
+
+       width = mode->crtc_hdisplay;
+       height = mode->crtc_vdisplay;
+
+       /* do some mode translations */
+       h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+       h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+       v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+       v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+
+       h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+       v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+       output_dtd.part1.clock = mode->clock / 10;
+       output_dtd.part1.h_active = width & 0xff;
+       output_dtd.part1.h_blank = h_blank_len & 0xff;
+       output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
+               ((h_blank_len >> 8) & 0xf);
+       output_dtd.part1.v_active = height & 0xff;
+       output_dtd.part1.v_blank = v_blank_len & 0xff;
+       output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
+               ((v_blank_len >> 8) & 0xf);
+
+       output_dtd.part2.h_sync_off = h_sync_offset;
+       output_dtd.part2.h_sync_width = h_sync_len & 0xff;
+       output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+               (v_sync_len & 0xf);
+       output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+               ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+               ((v_sync_len & 0x30) >> 4);
+
+       output_dtd.part2.dtd_flags = 0x18;
+       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+               output_dtd.part2.dtd_flags |= 0x2;
+       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+               output_dtd.part2.dtd_flags |= 0x4;
+
+       output_dtd.part2.sdvo_flags = 0;
+       output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
+       output_dtd.part2.reserved = 0;
+
+       /* Set the output timing to the screen */
+       intel_sdvo_set_target_output(intel_output, sdvo_priv->active_outputs);
+       intel_sdvo_set_output_timing(intel_output, &output_dtd);
+
+       /* Set the input timing to the screen. Assume always input 0. */
+       intel_sdvo_set_target_input(intel_output, true, false);
+
+       /* We would like to use i830_sdvo_create_preferred_input_timing() to
+        * provide the device with a timing it can support, if it supports that
+        * feature.  However, presumably we would need to adjust the CRTC to
+        * output the preferred timing, and we don't support that currently.
+        */
+       intel_sdvo_set_input_timing(intel_output, &output_dtd);
+
+       switch (intel_sdvo_get_pixel_multiplier(mode)) {
+       case 1:
+               intel_sdvo_set_clock_rate_mult(intel_output,
+                                              SDVO_CLOCK_RATE_MULT_1X);
+               break;
+       case 2:
+               intel_sdvo_set_clock_rate_mult(intel_output,
+                                              SDVO_CLOCK_RATE_MULT_2X);
+               break;
+       case 4:
+               intel_sdvo_set_clock_rate_mult(intel_output,
+                                              SDVO_CLOCK_RATE_MULT_4X);
+               break;
+       }
+
+       /* Set the SDVO control regs. */
+        if (0/*IS_I965GM(dev)*/) {
+                sdvox = SDVO_BORDER_ENABLE;
+        } else {
+                sdvox = I915_READ(sdvo_priv->output_device);
+                switch (sdvo_priv->output_device) {
+                case SDVOB:
+                        sdvox &= SDVOB_PRESERVE_MASK;
+                        break;
+                case SDVOC:
+                        sdvox &= SDVOC_PRESERVE_MASK;
+                        break;
+                }
+                sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+        }
+       if (intel_crtc->pipe == 1)
+               sdvox |= SDVO_PIPE_B_SELECT;
+
+       sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
+       if (IS_I965G(dev)) {
+               /* done in crtc_mode_set as the dpll_md reg must be written
+                  early */
+       } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+               /* done in crtc_mode_set as it lives inside the
+                  dpll register */
+       } else {
+               sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+       }
+
+       intel_sdvo_write_sdvox(intel_output, sdvox);
+}
+
+static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_output *intel_output = enc_to_intel_output(encoder);
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       u32 temp;
+
+       if (mode != DRM_MODE_DPMS_ON) {
+               intel_sdvo_set_active_outputs(intel_output, 0);
+               if (0)
+                       intel_sdvo_set_encoder_power_state(intel_output, mode);
+
+               if (mode == DRM_MODE_DPMS_OFF) {
+                       temp = I915_READ(sdvo_priv->output_device);
+                       if ((temp & SDVO_ENABLE) != 0) {
+                               intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE);
+                       }
+               }
+       } else {
+               bool input1, input2;
+               int i;
+               u8 status;
+
+               temp = I915_READ(sdvo_priv->output_device);
+               if ((temp & SDVO_ENABLE) == 0)
+                       intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE);
+               for (i = 0; i < 2; i++)
+                 intel_wait_for_vblank(dev);
+
+               status = intel_sdvo_get_trained_inputs(intel_output, &input1,
+                                                      &input2);
+
+
+               /* Warn if the device reported failure to sync.
+                * A lot of SDVO devices fail to notify of sync, but it's
+                * a given it the status is a success, we succeeded.
+                */
+               if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+                       DRM_DEBUG("First %s output reported failure to sync\n",
+                                  SDVO_NAME(sdvo_priv));
+               }
+
+               if (0)
+                       intel_sdvo_set_encoder_power_state(intel_output, mode);
+               intel_sdvo_set_active_outputs(intel_output, sdvo_priv->active_outputs);
+       }
+       return;
+}
+
+static void intel_sdvo_save(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       int o;
+
+       sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output);
+       intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs);
+
+       if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
+               intel_sdvo_set_target_input(intel_output, true, false);
+               intel_sdvo_get_input_timing(intel_output,
+                                           &sdvo_priv->save_input_dtd_1);
+       }
+
+       if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
+               intel_sdvo_set_target_input(intel_output, false, true);
+               intel_sdvo_get_input_timing(intel_output,
+                                           &sdvo_priv->save_input_dtd_2);
+       }
+
+       for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
+       {
+               u16  this_output = (1 << o);
+               if (sdvo_priv->caps.output_flags & this_output)
+               {
+                       intel_sdvo_set_target_output(intel_output, this_output);
+                       intel_sdvo_get_output_timing(intel_output,
+                                                    &sdvo_priv->save_output_dtd[o]);
+               }
+       }
+
+       sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device);
+}
+
+static void intel_sdvo_restore(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+       int o;
+       int i;
+       bool input1, input2;
+       u8 status;
+
+       intel_sdvo_set_active_outputs(intel_output, 0);
+
+       for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
+       {
+               u16  this_output = (1 << o);
+               if (sdvo_priv->caps.output_flags & this_output) {
+                       intel_sdvo_set_target_output(intel_output, this_output);
+                       intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]);
+               }
+       }
+
+       if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
+               intel_sdvo_set_target_input(intel_output, true, false);
+               intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1);
+       }
+
+       if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
+               intel_sdvo_set_target_input(intel_output, false, true);
+               intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2);
+       }
+
+       intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult);
+
+       I915_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
+
+       if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
+       {
+               for (i = 0; i < 2; i++)
+                       intel_wait_for_vblank(dev);
+               status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2);
+               if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
+                       DRM_DEBUG("First %s output reported failure to sync\n",
+                                  SDVO_NAME(sdvo_priv));
+       }
+
+       intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs);
+}
+
+static int intel_sdvo_mode_valid(struct drm_connector *connector,
+                                struct drm_display_mode *mode)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
+       if (sdvo_priv->pixel_clock_min > mode->clock)
+               return MODE_CLOCK_LOW;
+
+       if (sdvo_priv->pixel_clock_max < mode->clock)
+               return MODE_CLOCK_HIGH;
+
+       return MODE_OK;
+}
+
+static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps)
+{
+       u8 status;
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
+       status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps));
+       if (status != SDVO_CMD_STATUS_SUCCESS)
+               return false;
+
+       return true;
+}
+
+struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
+{
+       struct drm_connector *connector = NULL;
+       struct intel_output *iout = NULL;
+       struct intel_sdvo_priv *sdvo;
+
+       /* find the sdvo connector */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               iout = to_intel_output(connector);
+
+               if (iout->type != INTEL_OUTPUT_SDVO)
+                       continue;
+
+               sdvo = iout->dev_priv;
+
+               if (sdvo->output_device == SDVOB && sdvoB)
+                       return connector;
+
+               if (sdvo->output_device == SDVOC && !sdvoB)
+                       return connector;
+
+       }
+
+       return NULL;
+}
+
+int intel_sdvo_supports_hotplug(struct drm_connector *connector)
+{
+       u8 response[2];
+       u8 status;
+       struct intel_output *intel_output;
+       DRM_DEBUG("\n");
+
+       if (!connector)
+               return 0;
+
+       intel_output = to_intel_output(connector);
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+       status = intel_sdvo_read_response(intel_output, &response, 2);
+
+       if (response[0] !=0)
+               return 1;
+
+       return 0;
+}
+
+void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+{
+       u8 response[2];
+       u8 status;
+       struct intel_output *intel_output = to_intel_output(connector);
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+       intel_sdvo_read_response(intel_output, &response, 2);
+
+       if (on) {
+               intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+               status = intel_sdvo_read_response(intel_output, &response, 2);
+
+               intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+       } else {
+               response[0] = 0;
+               response[1] = 0;
+               intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+       }
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+       intel_sdvo_read_response(intel_output, &response, 2);
+}
+
+static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
+{
+       u8 response[2];
+       u8 status;
+       struct intel_output *intel_output = to_intel_output(connector);
+
+       intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
+       status = intel_sdvo_read_response(intel_output, &response, 2);
+
+       DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
+       if ((response[0] != 0) || (response[1] != 0))
+               return connector_status_connected;
+       else
+               return connector_status_disconnected;
+}
+
+static int intel_sdvo_get_modes(struct drm_connector *connector)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+
+       /* set the bus switch and get the modes */
+       intel_sdvo_set_control_bus_switch(intel_output, SDVO_CONTROL_BUS_DDC2);
+       intel_ddc_get_modes(intel_output);
+
+       if (list_empty(&connector->probed_modes))
+               return 0;
+       return 1;
+}
+
+static void intel_sdvo_destroy(struct drm_connector *connector)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+
+       if (intel_output->i2c_bus)
+               intel_i2c_destroy(intel_output->i2c_bus);
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       kfree(intel_output);
+}
+
+static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
+       .dpms = intel_sdvo_dpms,
+       .mode_fixup = intel_sdvo_mode_fixup,
+       .prepare = intel_encoder_prepare,
+       .mode_set = intel_sdvo_mode_set,
+       .commit = intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
+       .save = intel_sdvo_save,
+       .restore = intel_sdvo_restore,
+       .detect = intel_sdvo_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = intel_sdvo_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
+       .get_modes = intel_sdvo_get_modes,
+       .mode_valid = intel_sdvo_mode_valid,
+       .best_encoder = intel_best_encoder,
+};
+
+static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
+       .destroy = intel_sdvo_enc_destroy,
+};
+
+
+void intel_sdvo_init(struct drm_device *dev, int output_device)
+{
+       struct drm_connector *connector;
+       struct intel_output *intel_output;
+       struct intel_sdvo_priv *sdvo_priv;
+       struct intel_i2c_chan *i2cbus = NULL;
+       int connector_type;
+       u8 ch[0x40];
+       int i;
+       int encoder_type, output_id;
+
+       intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
+       if (!intel_output) {
+               return;
+       }
+
+       connector = &intel_output->base;
+
+       drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
+                          DRM_MODE_CONNECTOR_Unknown);
+       drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
+       sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
+       intel_output->type = INTEL_OUTPUT_SDVO;
+
+       connector->interlace_allowed = 0;
+       connector->doublescan_allowed = 0;
+
+       /* setup the DDC bus. */
+       if (output_device == SDVOB)
+               i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+       else
+               i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+
+       if (!i2cbus)
+               goto err_connector;
+
+       sdvo_priv->i2c_bus = i2cbus;
+
+       if (output_device == SDVOB) {
+               output_id = 1;
+               sdvo_priv->i2c_bus->slave_addr = 0x38;
+       } else {
+               output_id = 2;
+               sdvo_priv->i2c_bus->slave_addr = 0x39;
+       }
+
+       sdvo_priv->output_device = output_device;
+       intel_output->i2c_bus = i2cbus;
+       intel_output->dev_priv = sdvo_priv;
+
+
+       /* Read the regs to test if we can talk to the device */
+       for (i = 0; i < 0x40; i++) {
+               if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
+                       DRM_DEBUG("No SDVO device found on SDVO%c\n",
+                                 output_device == SDVOB ? 'B' : 'C');
+                       goto err_i2c;
+               }
+       }
+
+       intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
+
+       memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs));
+
+       /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
+       if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0)
+       {
+               sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
+               connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+               encoder_type = DRM_MODE_ENCODER_DAC;
+               connector_type = DRM_MODE_CONNECTOR_VGA;
+       }
+       else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1)
+       {
+               sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
+               connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+               encoder_type = DRM_MODE_ENCODER_DAC;
+               connector_type = DRM_MODE_CONNECTOR_VGA;
+       }
+       else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
+       {
+               sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
+               connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+               encoder_type = DRM_MODE_ENCODER_TMDS;
+               connector_type = DRM_MODE_CONNECTOR_DVID;
+       }
+       else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1)
+       {
+               sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
+               connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+               encoder_type = DRM_MODE_ENCODER_TMDS;
+               connector_type = DRM_MODE_CONNECTOR_DVID;
+       }
+       else
+       {
+               unsigned char bytes[2];
+
+               memcpy (bytes, &sdvo_priv->caps.output_flags, 2);
+               DRM_DEBUG("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
+                         SDVO_NAME(sdvo_priv),
+                         bytes[0], bytes[1]);
+               goto err_i2c;
+       }
+
+       drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type);
+       drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
+       connector->connector_type = connector_type;
+
+       drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
+       drm_sysfs_connector_add(connector);
+
+       /* Set the input timing to the screen. Assume always input 0. */
+       intel_sdvo_set_target_input(intel_output, true, false);
+
+       intel_sdvo_get_input_pixel_clock_range(intel_output,
+                                              &sdvo_priv->pixel_clock_min,
+                                              &sdvo_priv->pixel_clock_max);
+
+
+       DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
+                 "clock range %dMHz - %dMHz, "
+                 "input 1: %c, input 2: %c, "
+                 "output 1: %c, output 2: %c\n",
+                 SDVO_NAME(sdvo_priv),
+                 sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
+                 sdvo_priv->caps.device_rev_id,
+                 sdvo_priv->pixel_clock_min / 1000,
+                 sdvo_priv->pixel_clock_max / 1000,
+                 (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+                 (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+                 /* check currently supported outputs */
+                 sdvo_priv->caps.output_flags &
+                       (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+                 sdvo_priv->caps.output_flags &
+                       (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+
+       intel_output->ddc_bus = i2cbus;
+
+       return;
+
+err_i2c:
+       intel_i2c_destroy(intel_output->i2c_bus);
+err_connector:
+       drm_connector_cleanup(connector);
+       kfree(intel_output);
+
+       return;
+}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
new file mode 100644 (file)
index 0000000..861a43f
--- /dev/null
@@ -0,0 +1,327 @@
+/*
+ * Copyright Â© 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST   (0)
+#define SDVO_OUTPUT_TMDS0   (1 << 0)
+#define SDVO_OUTPUT_RGB0    (1 << 1)
+#define SDVO_OUTPUT_CVBS0   (1 << 2)
+#define SDVO_OUTPUT_SVID0   (1 << 3)
+#define SDVO_OUTPUT_YPRPB0  (1 << 4)
+#define SDVO_OUTPUT_SCART0  (1 << 5)
+#define SDVO_OUTPUT_LVDS0   (1 << 6)
+#define SDVO_OUTPUT_TMDS1   (1 << 8)
+#define SDVO_OUTPUT_RGB1    (1 << 9)
+#define SDVO_OUTPUT_CVBS1   (1 << 10)
+#define SDVO_OUTPUT_SVID1   (1 << 11)
+#define SDVO_OUTPUT_YPRPB1  (1 << 12)
+#define SDVO_OUTPUT_SCART1  (1 << 13)
+#define SDVO_OUTPUT_LVDS1   (1 << 14)
+#define SDVO_OUTPUT_LAST    (14)
+
+struct intel_sdvo_caps {
+    u8 vendor_id;
+    u8 device_id;
+    u8 device_rev_id;
+    u8 sdvo_version_major;
+    u8 sdvo_version_minor;
+    unsigned int sdvo_inputs_mask:2;
+    unsigned int smooth_scaling:1;
+    unsigned int sharp_scaling:1;
+    unsigned int up_scaling:1;
+    unsigned int down_scaling:1;
+    unsigned int stall_support:1;
+    unsigned int pad:1;
+    u16 output_flags;
+} __attribute__((packed));
+
+/** This matches the EDID DTD structure, more or less */
+struct intel_sdvo_dtd {
+    struct {
+       u16 clock;              /**< pixel clock, in 10kHz units */
+       u8 h_active;            /**< lower 8 bits (pixels) */
+       u8 h_blank;             /**< lower 8 bits (pixels) */
+       u8 h_high;              /**< upper 4 bits each h_active, h_blank */
+       u8 v_active;            /**< lower 8 bits (lines) */
+       u8 v_blank;             /**< lower 8 bits (lines) */
+       u8 v_high;              /**< upper 4 bits each v_active, v_blank */
+    } part1;
+
+    struct {
+       u8 h_sync_off;  /**< lower 8 bits, from hblank start */
+       u8 h_sync_width;        /**< lower 8 bits (pixels) */
+       /** lower 4 bits each vsync offset, vsync width */
+       u8 v_sync_off_width;
+       /**
+        * 2 high bits of hsync offset, 2 high bits of hsync width,
+        * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+        */
+       u8 sync_off_width_high;
+       u8 dtd_flags;
+       u8 sdvo_flags;
+       /** bits 6-7 of vsync offset at bits 6-7 */
+       u8 v_sync_off_high;
+       u8 reserved;
+    } part2;
+} __attribute__((packed));
+
+struct intel_sdvo_pixel_clock_range {
+    u16 min;                   /**< pixel clock, in 10kHz units */
+    u16 max;                   /**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+
+struct intel_sdvo_preferred_input_timing_args {
+    u16 clock;
+    u16 width;
+    u16 height;
+} __attribute__((packed));
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0                         0x07
+#define SDVO_I2C_ARG_1                         0x06
+#define SDVO_I2C_ARG_2                         0x05
+#define SDVO_I2C_ARG_3                         0x04
+#define SDVO_I2C_ARG_4                         0x03
+#define SDVO_I2C_ARG_5                         0x02
+#define SDVO_I2C_ARG_6                         0x01
+#define SDVO_I2C_ARG_7                         0x00
+#define SDVO_I2C_OPCODE                                0x08
+#define SDVO_I2C_CMD_STATUS                    0x09
+#define SDVO_I2C_RETURN_0                      0x0a
+#define SDVO_I2C_RETURN_1                      0x0b
+#define SDVO_I2C_RETURN_2                      0x0c
+#define SDVO_I2C_RETURN_3                      0x0d
+#define SDVO_I2C_RETURN_4                      0x0e
+#define SDVO_I2C_RETURN_5                      0x0f
+#define SDVO_I2C_RETURN_6                      0x10
+#define SDVO_I2C_RETURN_7                      0x11
+#define SDVO_I2C_VENDOR_BEGIN                  0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON               0x0
+#define SDVO_CMD_STATUS_SUCCESS                        0x1
+#define SDVO_CMD_STATUS_NOTSUPP                        0x2
+#define SDVO_CMD_STATUS_INVALID_ARG            0x3
+#define SDVO_CMD_STATUS_PENDING                        0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED   0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP       0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET                                 0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS                       0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV                      0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR                    SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR                    SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH                    SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS                    0x03
+struct intel_sdvo_get_trained_inputs_response {
+    unsigned int input0_trained:1;
+    unsigned int input1_trained:1;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS                    0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags.  Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS                    0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP                                0x06
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP                                0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS                 0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT                  0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG                   0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG                   0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE            0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+    u16 interrupt_status;
+    unsigned int ambient_light_interrupt:1;
+    unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT                      0x10
+struct intel_sdvo_set_target_input_args {
+    unsigned int target_1:1;
+    unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targetted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT                     0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1               0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2               0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1               0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2               0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1              0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2              0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1              0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2              0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW                            SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH                           SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE                             SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK                              SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH                               SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE                             SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK                              SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH                               SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF                            SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH                          SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH                      SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH                  SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS                            SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED                          (1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK                         (3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK                          (3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK                           (3 << 1)
+# define SDVO_DTD_SDVO_FLAS                            SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL                              (1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED                           (0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT                         (1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK                       (3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE                       (0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP                      (1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH                     (2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH                       SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING         0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW         SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH                SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW         SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH                SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW                SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH       SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS             SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED          (1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED              (1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1      0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2      0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE           0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE          0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS                0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT                   0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT                   0x21
+# define SDVO_CLOCK_RATE_MULT_1X                               (1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X                               (1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X                               (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS              0x27
+
+#define SDVO_CMD_GET_TV_FORMAT                         0x28
+
+#define SDVO_CMD_SET_TV_FORMAT                         0x29
+
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES            0x2a
+#define SDVO_CMD_GET_ENCODER_POWER_STATE               0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE               0x2c
+# define SDVO_ENCODER_STATE_ON                                 (1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY                            (1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND                            (1 << 2)
+# define SDVO_ENCODER_STATE_OFF                                        (1 << 3)
+
+#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT             0x93
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH                        0x7a
+# define SDVO_CONTROL_BUS_PROM                         0x0
+# define SDVO_CONTROL_BUS_DDC1                         0x1
+# define SDVO_CONTROL_BUS_DDC2                         0x2
+# define SDVO_CONTROL_BUS_DDC3                         0x3
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
new file mode 100644 (file)
index 0000000..fbb35dc
--- /dev/null
@@ -0,0 +1,1725 @@
+/*
+ * Copyright Â© 2006-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file
+ * Integrated TV-out support for the 915GM and 945GM.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+enum tv_margin {
+       TV_MARGIN_LEFT, TV_MARGIN_TOP,
+       TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM
+};
+
+/** Private structure for the integrated TV support */
+struct intel_tv_priv {
+       int type;
+       char *tv_format;
+       int margin[4];
+       u32 save_TV_H_CTL_1;
+       u32 save_TV_H_CTL_2;
+       u32 save_TV_H_CTL_3;
+       u32 save_TV_V_CTL_1;
+       u32 save_TV_V_CTL_2;
+       u32 save_TV_V_CTL_3;
+       u32 save_TV_V_CTL_4;
+       u32 save_TV_V_CTL_5;
+       u32 save_TV_V_CTL_6;
+       u32 save_TV_V_CTL_7;
+       u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3;
+
+       u32 save_TV_CSC_Y;
+       u32 save_TV_CSC_Y2;
+       u32 save_TV_CSC_U;
+       u32 save_TV_CSC_U2;
+       u32 save_TV_CSC_V;
+       u32 save_TV_CSC_V2;
+       u32 save_TV_CLR_KNOBS;
+       u32 save_TV_CLR_LEVEL;
+       u32 save_TV_WIN_POS;
+       u32 save_TV_WIN_SIZE;
+       u32 save_TV_FILTER_CTL_1;
+       u32 save_TV_FILTER_CTL_2;
+       u32 save_TV_FILTER_CTL_3;
+
+       u32 save_TV_H_LUMA[60];
+       u32 save_TV_H_CHROMA[60];
+       u32 save_TV_V_LUMA[43];
+       u32 save_TV_V_CHROMA[43];
+
+       u32 save_TV_DAC;
+       u32 save_TV_CTL;
+};
+
+struct video_levels {
+       int blank, black, burst;
+};
+
+struct color_conversion {
+       u16 ry, gy, by, ay;
+       u16 ru, gu, bu, au;
+       u16 rv, gv, bv, av;
+};
+
+static const u32 filter_table[] = {
+       0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+       0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+       0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+       0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+       0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+       0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+       0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+       0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+       0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+       0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+       0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+       0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+       0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+       0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+       0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+       0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+       0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+       0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+       0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+       0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+       0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+       0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+       0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+       0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+       0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+       0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+       0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+       0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+       0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+       0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+       0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0,
+       0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+       0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+       0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+       0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+       0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+       0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+       0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+       0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+       0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+       0x28003100, 0x28002F00, 0x00003100, 0x36403000,
+       0x2D002CC0, 0x30003640, 0x2D0036C0,
+       0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+       0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+       0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+       0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+       0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+       0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+       0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+       0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+       0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+       0x28003100, 0x28002F00, 0x00003100,
+};
+
+/*
+ * Color conversion values have 3 separate fixed point formats:
+ *
+ * 10 bit fields (ay, au)
+ *   1.9 fixed point (b.bbbbbbbbb)
+ * 11 bit fields (ry, by, ru, gu, gv)
+ *   exp.mantissa (ee.mmmmmmmmm)
+ *   ee = 00 = 10^-1 (0.mmmmmmmmm)
+ *   ee = 01 = 10^-2 (0.0mmmmmmmmm)
+ *   ee = 10 = 10^-3 (0.00mmmmmmmmm)
+ *   ee = 11 = 10^-4 (0.000mmmmmmmmm)
+ * 12 bit fields (gy, rv, bu)
+ *   exp.mantissa (eee.mmmmmmmmm)
+ *   eee = 000 = 10^-1 (0.mmmmmmmmm)
+ *   eee = 001 = 10^-2 (0.0mmmmmmmmm)
+ *   eee = 010 = 10^-3 (0.00mmmmmmmmm)
+ *   eee = 011 = 10^-4 (0.000mmmmmmmmm)
+ *   eee = 100 = reserved
+ *   eee = 101 = reserved
+ *   eee = 110 = reserved
+ *   eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation)
+ *
+ * Saturation and contrast are 8 bits, with their own representation:
+ * 8 bit field (saturation, contrast)
+ *   exp.mantissa (ee.mmmmmm)
+ *   ee = 00 = 10^-1 (0.mmmmmm)
+ *   ee = 01 = 10^0 (m.mmmmm)
+ *   ee = 10 = 10^1 (mm.mmmm)
+ *   ee = 11 = 10^2 (mmm.mmm)
+ *
+ * Simple conversion function:
+ *
+ * static u32
+ * float_to_csc_11(float f)
+ * {
+ *     u32 exp;
+ *     u32 mant;
+ *     u32 ret;
+ *
+ *     if (f < 0)
+ *         f = -f;
+ *
+ *     if (f >= 1) {
+ *         exp = 0x7;
+ *        mant = 1 << 8;
+ *     } else {
+ *         for (exp = 0; exp < 3 && f < 0.5; exp++)
+ *            f *= 2.0;
+ *         mant = (f * (1 << 9) + 0.5);
+ *         if (mant >= (1 << 9))
+ *             mant = (1 << 9) - 1;
+ *     }
+ *     ret = (exp << 9) | mant;
+ *     return ret;
+ * }
+ */
+
+/*
+ * Behold, magic numbers!  If we plant them they might grow a big
+ * s-video cable to the sky... or something.
+ *
+ * Pre-converted to appropriate hex value.
+ */
+
+/*
+ * PAL & NTSC values for composite & s-video connections
+ */
+static const struct color_conversion ntsc_m_csc_composite = {
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+       .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
+       .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+};
+
+static const struct video_levels ntsc_m_levels_composite = {
+       .blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion ntsc_m_csc_svideo = {
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
+       .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
+       .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+};
+
+static const struct video_levels ntsc_m_levels_svideo = {
+       .blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion ntsc_j_csc_composite = {
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
+       .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00,
+       .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00,
+};
+
+static const struct video_levels ntsc_j_levels_composite = {
+       .blank = 225, .black = 225, .burst = 113,
+};
+
+static const struct color_conversion ntsc_j_csc_svideo = {
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
+       .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00,
+       .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00,
+};
+
+static const struct video_levels ntsc_j_levels_svideo = {
+       .blank = 266, .black = 266, .burst = 133,
+};
+
+static const struct color_conversion pal_csc_composite = {
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
+       .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00,
+       .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00,
+};
+
+static const struct video_levels pal_levels_composite = {
+       .blank = 237, .black = 237, .burst = 118,
+};
+
+static const struct color_conversion pal_csc_svideo = {
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+       .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00,
+       .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00,
+};
+
+static const struct video_levels pal_levels_svideo = {
+       .blank = 280, .black = 280, .burst = 139,
+};
+
+static const struct color_conversion pal_m_csc_composite = {
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+       .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
+       .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+};
+
+static const struct video_levels pal_m_levels_composite = {
+       .blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion pal_m_csc_svideo = {
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
+       .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
+       .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+};
+
+static const struct video_levels pal_m_levels_svideo = {
+       .blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion pal_n_csc_composite = {
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+       .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
+       .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
+};
+
+static const struct video_levels pal_n_levels_composite = {
+       .blank = 225, .black = 267, .burst = 118,
+};
+
+static const struct color_conversion pal_n_csc_svideo = {
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
+       .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
+       .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
+};
+
+static const struct video_levels pal_n_levels_svideo = {
+       .blank = 266, .black = 316, .burst = 139,
+};
+
+/*
+ * Component connections
+ */
+static const struct color_conversion sdtv_csc_yprpb = {
+       .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146,
+       .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00,
+       .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00,
+};
+
+static const struct color_conversion sdtv_csc_rgb = {
+       .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+       .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+       .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct color_conversion hdtv_csc_yprpb = {
+       .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146,
+       .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00,
+       .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00,
+};
+
+static const struct color_conversion hdtv_csc_rgb = {
+       .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+       .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+       .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct video_levels component_levels = {
+       .blank = 279, .black = 279, .burst = 0,
+};
+
+
+struct tv_mode {
+       char *name;
+       int clock;
+       int refresh; /* in millihertz (for precision) */
+       u32 oversample;
+       int hsync_end, hblank_start, hblank_end, htotal;
+       bool progressive, trilevel_sync, component_only;
+       int vsync_start_f1, vsync_start_f2, vsync_len;
+       bool veq_ena;
+       int veq_start_f1, veq_start_f2, veq_len;
+       int vi_end_f1, vi_end_f2, nbr_end;
+       bool burst_ena;
+       int hburst_start, hburst_len;
+       int vburst_start_f1, vburst_end_f1;
+       int vburst_start_f2, vburst_end_f2;
+       int vburst_start_f3, vburst_end_f3;
+       int vburst_start_f4, vburst_end_f4;
+       /*
+        * subcarrier programming
+        */
+       int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
+       u32 sc_reset;
+       bool pal_burst;
+       /*
+        * blank/black levels
+        */
+       const struct video_levels *composite_levels, *svideo_levels;
+       const struct color_conversion *composite_color, *svideo_color;
+       const u32 *filter_table;
+       int max_srcw;
+};
+
+
+/*
+ * Sub carrier DDA
+ *
+ *  I think this works as follows:
+ *
+ *  subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096
+ *
+ * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value
+ *
+ * So,
+ *  dda1_ideal = subcarrier/pixel * 4096
+ *  dda1_inc = floor (dda1_ideal)
+ *  dda2 = dda1_ideal - dda1_inc
+ *
+ *  then pick a ratio for dda2 that gives the closest approximation. If
+ *  you can't get close enough, you can play with dda3 as well. This
+ *  seems likely to happen when dda2 is small as the jumps would be larger
+ *
+ * To invert this,
+ *
+ *  pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size)
+ *
+ * The constants below were all computed using a 107.520MHz clock
+ */
+
+/**
+ * Register programming values for TV modes.
+ *
+ * These values account for -1s required.
+ */
+
+const static struct tv_mode tv_modes[] = {
+       {
+               .name           = "NTSC-M",
+               .clock          = 107520,
+               .refresh        = 29970,
+               .oversample     = TV_OVERSAMPLE_8X,
+               .component_only = 0,
+               /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+
+               .hsync_end      = 64,               .hblank_end         = 124,
+               .hblank_start   = 836,              .htotal             = 857,
+
+               .progressive    = false,            .trilevel_sync = false,
+
+               .vsync_start_f1 = 6,                .vsync_start_f2     = 7,
+               .vsync_len      = 6,
+
+               .veq_ena        = true,             .veq_start_f1       = 0,
+               .veq_start_f2   = 1,                .veq_len            = 18,
+
+               .vi_end_f1      = 20,               .vi_end_f2          = 21,
+               .nbr_end        = 240,
+
+               .burst_ena      = true,
+               .hburst_start   = 72,               .hburst_len         = 34,
+               .vburst_start_f1 = 9,               .vburst_end_f1      = 240,
+               .vburst_start_f2 = 10,              .vburst_end_f2      = 240,
+               .vburst_start_f3 = 9,               .vburst_end_f3      = 240,
+               .vburst_start_f4 = 10,              .vburst_end_f4      = 240,
+
+               /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+               .dda1_inc       =    136,
+               .dda2_inc       =   7624,           .dda2_size          =  20013,
+               .dda3_inc       =      0,           .dda3_size          =      0,
+               .sc_reset       = TV_SC_RESET_EVERY_4,
+               .pal_burst      = false,
+
+               .composite_levels = &ntsc_m_levels_composite,
+               .composite_color = &ntsc_m_csc_composite,
+               .svideo_levels  = &ntsc_m_levels_svideo,
+               .svideo_color = &ntsc_m_csc_svideo,
+
+               .filter_table = filter_table,
+       },
+       {
+               .name           = "NTSC-443",
+               .clock          = 107520,
+               .refresh        = 29970,
+               .oversample     = TV_OVERSAMPLE_8X,
+               .component_only = 0,
+               /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
+               .hsync_end      = 64,               .hblank_end         = 124,
+               .hblank_start   = 836,              .htotal             = 857,
+
+               .progressive    = false,            .trilevel_sync = false,
+
+               .vsync_start_f1 = 6,                .vsync_start_f2     = 7,
+               .vsync_len      = 6,
+
+               .veq_ena        = true,             .veq_start_f1       = 0,
+               .veq_start_f2   = 1,                .veq_len            = 18,
+
+               .vi_end_f1      = 20,               .vi_end_f2          = 21,
+               .nbr_end        = 240,
+
+               .burst_ena      = 8,
+               .hburst_start   = 72,               .hburst_len         = 34,
+               .vburst_start_f1 = 9,               .vburst_end_f1      = 240,
+               .vburst_start_f2 = 10,              .vburst_end_f2      = 240,
+               .vburst_start_f3 = 9,               .vburst_end_f3      = 240,
+               .vburst_start_f4 = 10,              .vburst_end_f4      = 240,
+
+               /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+               .dda1_inc       =    168,
+               .dda2_inc       =  18557,       .dda2_size      =  20625,
+               .dda3_inc       =      0,       .dda3_size      =      0,
+               .sc_reset   = TV_SC_RESET_EVERY_8,
+               .pal_burst  = true,
+
+               .composite_levels = &ntsc_m_levels_composite,
+               .composite_color = &ntsc_m_csc_composite,
+               .svideo_levels  = &ntsc_m_levels_svideo,
+               .svideo_color = &ntsc_m_csc_svideo,
+
+               .filter_table = filter_table,
+       },
+       {
+               .name           = "NTSC-J",
+               .clock          = 107520,
+               .refresh        = 29970,
+               .oversample     = TV_OVERSAMPLE_8X,
+               .component_only = 0,
+
+               /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+               .hsync_end      = 64,               .hblank_end         = 124,
+               .hblank_start = 836,        .htotal             = 857,
+
+               .progressive    = false,    .trilevel_sync = false,
+
+               .vsync_start_f1 = 6,        .vsync_start_f2     = 7,
+               .vsync_len      = 6,
+
+               .veq_ena        = true,             .veq_start_f1       = 0,
+               .veq_start_f2 = 1,          .veq_len            = 18,
+
+               .vi_end_f1      = 20,               .vi_end_f2          = 21,
+               .nbr_end        = 240,
+
+               .burst_ena      = true,
+               .hburst_start   = 72,               .hburst_len         = 34,
+               .vburst_start_f1 = 9,               .vburst_end_f1      = 240,
+               .vburst_start_f2 = 10,              .vburst_end_f2      = 240,
+               .vburst_start_f3 = 9,               .vburst_end_f3      = 240,
+               .vburst_start_f4 = 10,              .vburst_end_f4      = 240,
+
+               /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+               .dda1_inc       =    136,
+               .dda2_inc       =   7624,           .dda2_size          =  20013,
+               .dda3_inc       =      0,           .dda3_size          =      0,
+               .sc_reset       = TV_SC_RESET_EVERY_4,
+               .pal_burst      = false,
+
+               .composite_levels = &ntsc_j_levels_composite,
+               .composite_color = &ntsc_j_csc_composite,
+               .svideo_levels  = &ntsc_j_levels_svideo,
+               .svideo_color = &ntsc_j_csc_svideo,
+
+               .filter_table = filter_table,
+       },
+       {
+               .name           = "PAL-M",
+               .clock          = 107520,
+               .refresh        = 29970,
+               .oversample     = TV_OVERSAMPLE_8X,
+               .component_only = 0,
+
+               /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+               .hsync_end      = 64,             .hblank_end           = 124,
+               .hblank_start = 836,      .htotal               = 857,
+
+               .progressive    = false,            .trilevel_sync = false,
+
+               .vsync_start_f1 = 6,                .vsync_start_f2     = 7,
+               .vsync_len      = 6,
+
+               .veq_ena        = true,             .veq_start_f1       = 0,
+               .veq_start_f2   = 1,                .veq_len            = 18,
+
+               .vi_end_f1      = 20,               .vi_end_f2          = 21,
+               .nbr_end        = 240,
+
+               .burst_ena      = true,
+               .hburst_start   = 72,               .hburst_len         = 34,
+               .vburst_start_f1 = 9,               .vburst_end_f1      = 240,
+               .vburst_start_f2 = 10,              .vburst_end_f2      = 240,
+               .vburst_start_f3 = 9,               .vburst_end_f3      = 240,
+               .vburst_start_f4 = 10,              .vburst_end_f4      = 240,
+
+               /* desired 3.5800000 actual 3.5800000 clock 107.52 */
+               .dda1_inc       =    136,
+               .dda2_inc       =    7624,          .dda2_size          =  20013,
+               .dda3_inc       =      0,           .dda3_size          =      0,
+               .sc_reset       = TV_SC_RESET_EVERY_4,
+               .pal_burst  = false,
+
+               .composite_levels = &pal_m_levels_composite,
+               .composite_color = &pal_m_csc_composite,
+               .svideo_levels  = &pal_m_levels_svideo,
+               .svideo_color = &pal_m_csc_svideo,
+
+               .filter_table = filter_table,
+       },
+       {
+               /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+               .name       = "PAL-N",
+               .clock          = 107520,
+               .refresh        = 25000,
+               .oversample     = TV_OVERSAMPLE_8X,
+               .component_only = 0,
+
+               .hsync_end      = 64,               .hblank_end         = 128,
+               .hblank_start = 844,        .htotal             = 863,
+
+               .progressive  = false,    .trilevel_sync = false,
+
+
+               .vsync_start_f1 = 6,       .vsync_start_f2      = 7,
+               .vsync_len      = 6,
+
+               .veq_ena        = true,             .veq_start_f1       = 0,
+               .veq_start_f2   = 1,                .veq_len            = 18,
+
+               .vi_end_f1      = 24,               .vi_end_f2          = 25,
+               .nbr_end        = 286,
+
+               .burst_ena      = true,
+               .hburst_start = 73,                 .hburst_len         = 34,
+               .vburst_start_f1 = 8,       .vburst_end_f1      = 285,
+               .vburst_start_f2 = 8,       .vburst_end_f2      = 286,
+               .vburst_start_f3 = 9,       .vburst_end_f3      = 286,
+               .vburst_start_f4 = 9,       .vburst_end_f4      = 285,
+
+
+               /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+               .dda1_inc       =    168,
+               .dda2_inc       =  18557,       .dda2_size      =  20625,
+               .dda3_inc       =      0,       .dda3_size      =      0,
+               .sc_reset   = TV_SC_RESET_EVERY_8,
+               .pal_burst  = true,
+
+               .composite_levels = &pal_n_levels_composite,
+               .composite_color = &pal_n_csc_composite,
+               .svideo_levels  = &pal_n_levels_svideo,
+               .svideo_color = &pal_n_csc_svideo,
+
+               .filter_table = filter_table,
+       },
+       {
+               /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+               .name       = "PAL",
+               .clock          = 107520,
+               .refresh        = 25000,
+               .oversample     = TV_OVERSAMPLE_8X,
+               .component_only = 0,
+
+               .hsync_end      = 64,               .hblank_end         = 128,
+               .hblank_start   = 844,      .htotal             = 863,
+
+               .progressive    = false,    .trilevel_sync = false,
+
+               .vsync_start_f1 = 5,        .vsync_start_f2     = 6,
+               .vsync_len      = 5,
+
+               .veq_ena        = true,             .veq_start_f1       = 0,
+               .veq_start_f2   = 1,        .veq_len            = 15,
+
+               .vi_end_f1      = 24,               .vi_end_f2          = 25,
+               .nbr_end        = 286,
+
+               .burst_ena      = true,
+               .hburst_start   = 73,               .hburst_len         = 32,
+               .vburst_start_f1 = 8,               .vburst_end_f1      = 285,
+               .vburst_start_f2 = 8,               .vburst_end_f2      = 286,
+               .vburst_start_f3 = 9,               .vburst_end_f3      = 286,
+               .vburst_start_f4 = 9,               .vburst_end_f4      = 285,
+
+               /* desired 4.4336180 actual 4.4336180 clock 107.52 */
+               .dda1_inc       =    168,
+               .dda2_inc       =  18557,       .dda2_size      =  20625,
+               .dda3_inc       =      0,       .dda3_size      =      0,
+               .sc_reset   = TV_SC_RESET_EVERY_8,
+               .pal_burst  = true,
+
+               .composite_levels = &pal_levels_composite,
+               .composite_color = &pal_csc_composite,
+               .svideo_levels  = &pal_levels_svideo,
+               .svideo_color = &pal_csc_svideo,
+
+               .filter_table = filter_table,
+       },
+       {
+               .name       = "480p@59.94Hz",
+               .clock  = 107520,
+               .refresh        = 59940,
+               .oversample     = TV_OVERSAMPLE_4X,
+               .component_only = 1,
+
+               .hsync_end      = 64,               .hblank_end         = 122,
+               .hblank_start   = 842,              .htotal             = 857,
+
+               .progressive    = true,.trilevel_sync = false,
+
+               .vsync_start_f1 = 12,               .vsync_start_f2     = 12,
+               .vsync_len      = 12,
+
+               .veq_ena        = false,
+
+               .vi_end_f1      = 44,               .vi_end_f2          = 44,
+               .nbr_end        = 496,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
+       {
+               .name       = "480p@60Hz",
+               .clock  = 107520,
+               .refresh        = 60000,
+               .oversample     = TV_OVERSAMPLE_4X,
+               .component_only = 1,
+
+               .hsync_end      = 64,               .hblank_end         = 122,
+               .hblank_start   = 842,              .htotal             = 856,
+
+               .progressive    = true,.trilevel_sync = false,
+
+               .vsync_start_f1 = 12,               .vsync_start_f2     = 12,
+               .vsync_len      = 12,
+
+               .veq_ena        = false,
+
+               .vi_end_f1      = 44,               .vi_end_f2          = 44,
+               .nbr_end        = 496,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
+       {
+               .name       = "576p",
+               .clock  = 107520,
+               .refresh        = 50000,
+               .oversample     = TV_OVERSAMPLE_4X,
+               .component_only = 1,
+
+               .hsync_end      = 64,               .hblank_end         = 139,
+               .hblank_start   = 859,              .htotal             = 863,
+
+               .progressive    = true,         .trilevel_sync = false,
+
+               .vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+               .vsync_len      = 10,
+
+               .veq_ena        = false,
+
+               .vi_end_f1      = 48,               .vi_end_f2          = 48,
+               .nbr_end        = 575,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
+       {
+               .name       = "720p@60Hz",
+               .clock          = 148800,
+               .refresh        = 60000,
+               .oversample     = TV_OVERSAMPLE_2X,
+               .component_only = 1,
+
+               .hsync_end      = 80,               .hblank_end         = 300,
+               .hblank_start   = 1580,             .htotal             = 1649,
+
+               .progressive    = true,             .trilevel_sync = true,
+
+               .vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+               .vsync_len      = 10,
+
+               .veq_ena        = false,
+
+               .vi_end_f1      = 29,               .vi_end_f2          = 29,
+               .nbr_end        = 719,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
+       {
+               .name       = "720p@59.94Hz",
+               .clock          = 148800,
+               .refresh        = 59940,
+               .oversample     = TV_OVERSAMPLE_2X,
+               .component_only = 1,
+
+               .hsync_end      = 80,               .hblank_end         = 300,
+               .hblank_start   = 1580,             .htotal             = 1651,
+
+               .progressive    = true,             .trilevel_sync = true,
+
+               .vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+               .vsync_len      = 10,
+
+               .veq_ena        = false,
+
+               .vi_end_f1      = 29,               .vi_end_f2          = 29,
+               .nbr_end        = 719,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
+       {
+               .name       = "720p@50Hz",
+               .clock          = 148800,
+               .refresh        = 50000,
+               .oversample     = TV_OVERSAMPLE_2X,
+               .component_only = 1,
+
+               .hsync_end      = 80,               .hblank_end         = 300,
+               .hblank_start   = 1580,             .htotal             = 1979,
+
+               .progressive    = true,                 .trilevel_sync = true,
+
+               .vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+               .vsync_len      = 10,
+
+               .veq_ena        = false,
+
+               .vi_end_f1      = 29,               .vi_end_f2          = 29,
+               .nbr_end        = 719,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+               .max_srcw = 800
+       },
+       {
+               .name       = "1080i@50Hz",
+               .clock          = 148800,
+               .refresh        = 25000,
+               .oversample     = TV_OVERSAMPLE_2X,
+               .component_only = 1,
+
+               .hsync_end      = 88,               .hblank_end         = 235,
+               .hblank_start   = 2155,             .htotal             = 2639,
+
+               .progressive    = false,            .trilevel_sync = true,
+
+               .vsync_start_f1 = 4,              .vsync_start_f2     = 5,
+               .vsync_len      = 10,
+
+               .veq_ena        = true,             .veq_start_f1       = 4,
+               .veq_start_f2   = 4,        .veq_len            = 10,
+
+
+               .vi_end_f1      = 21,           .vi_end_f2          = 22,
+               .nbr_end        = 539,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
+       {
+               .name       = "1080i@60Hz",
+               .clock          = 148800,
+               .refresh        = 30000,
+               .oversample     = TV_OVERSAMPLE_2X,
+               .component_only = 1,
+
+               .hsync_end      = 88,               .hblank_end         = 235,
+               .hblank_start   = 2155,             .htotal             = 2199,
+
+               .progressive    = false,            .trilevel_sync = true,
+
+               .vsync_start_f1 = 4,               .vsync_start_f2     = 5,
+               .vsync_len      = 10,
+
+               .veq_ena        = true,             .veq_start_f1       = 4,
+               .veq_start_f2   = 4,                .veq_len            = 10,
+
+
+               .vi_end_f1      = 21,               .vi_end_f2          = 22,
+               .nbr_end        = 539,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
+       {
+               .name       = "1080i@59.94Hz",
+               .clock          = 148800,
+               .refresh        = 29970,
+               .oversample     = TV_OVERSAMPLE_2X,
+               .component_only = 1,
+
+               .hsync_end      = 88,               .hblank_end         = 235,
+               .hblank_start   = 2155,             .htotal             = 2200,
+
+               .progressive    = false,            .trilevel_sync = true,
+
+               .vsync_start_f1 = 4,            .vsync_start_f2    = 5,
+               .vsync_len      = 10,
+
+               .veq_ena        = true,             .veq_start_f1       = 4,
+               .veq_start_f2 = 4,                  .veq_len = 10,
+
+
+               .vi_end_f1      = 21,           .vi_end_f2              = 22,
+               .nbr_end        = 539,
+
+               .burst_ena      = false,
+
+               .filter_table = filter_table,
+       },
+};
+
+#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0])
+
+static void
+intel_tv_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       switch(mode) {
+       case DRM_MODE_DPMS_ON:
+               I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
+               break;
+       }
+}
+
+static void
+intel_tv_save(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+       int i;
+
+       tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
+       tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
+       tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
+       tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
+       tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
+       tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
+       tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
+       tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
+       tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
+       tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
+       tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
+       tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
+       tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
+
+       tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
+       tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
+       tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
+       tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
+       tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
+       tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
+       tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
+       tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
+       tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
+       tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
+       tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
+       tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
+       tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
+
+       for (i = 0; i < 60; i++)
+               tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
+       for (i = 0; i < 60; i++)
+               tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
+       for (i = 0; i < 43; i++)
+               tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
+       for (i = 0; i < 43; i++)
+               tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
+
+       tv_priv->save_TV_DAC = I915_READ(TV_DAC);
+       tv_priv->save_TV_CTL = I915_READ(TV_CTL);
+}
+
+static void
+intel_tv_restore(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+       struct drm_crtc *crtc = connector->encoder->crtc;
+       struct intel_crtc *intel_crtc;
+       int i;
+
+       /* FIXME: No CRTC? */
+       if (!crtc)
+               return;
+
+       intel_crtc = to_intel_crtc(crtc);
+       I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
+       I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
+       I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
+       I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
+       I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
+       I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
+       I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
+       I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
+       I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
+       I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
+       I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
+       I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
+       I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
+
+       I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
+       I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
+       I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
+       I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
+       I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
+       I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
+       I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
+       I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
+
+       {
+               int pipeconf_reg = (intel_crtc->pipe == 0) ?
+                       PIPEACONF : PIPEBCONF;
+               int dspcntr_reg = (intel_crtc->plane == 0) ?
+                       DSPACNTR : DSPBCNTR;
+               int pipeconf = I915_READ(pipeconf_reg);
+               int dspcntr = I915_READ(dspcntr_reg);
+               int dspbase_reg = (intel_crtc->plane == 0) ?
+                       DSPAADDR : DSPBADDR;
+               /* Pipe must be off here */
+               I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
+               /* Flush the plane changes */
+               I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+
+               if (!IS_I9XX(dev)) {
+                       /* Wait for vblank for the disable to take effect */
+                       intel_wait_for_vblank(dev);
+               }
+
+               I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
+               /* Wait for vblank for the disable to take effect. */
+               intel_wait_for_vblank(dev);
+
+               /* Filter ctl must be set before TV_WIN_SIZE */
+               I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
+               I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
+               I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
+               I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
+               I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
+               I915_WRITE(pipeconf_reg, pipeconf);
+               I915_WRITE(dspcntr_reg, dspcntr);
+               /* Flush the plane changes */
+               I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+       }
+
+       for (i = 0; i < 60; i++)
+               I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
+       for (i = 0; i < 60; i++)
+               I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
+       for (i = 0; i < 43; i++)
+               I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
+       for (i = 0; i < 43; i++)
+               I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
+
+       I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
+       I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
+}
+
+static const struct tv_mode *
+intel_tv_mode_lookup (char *tv_format)
+{
+       int i;
+
+       for (i = 0; i < sizeof(tv_modes) / sizeof (tv_modes[0]); i++) {
+               const struct tv_mode *tv_mode = &tv_modes[i];
+
+               if (!strcmp(tv_format, tv_mode->name))
+                       return tv_mode;
+       }
+       return NULL;
+}
+
+static const struct tv_mode *
+intel_tv_mode_find (struct intel_output *intel_output)
+{
+       struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+
+       return intel_tv_mode_lookup(tv_priv->tv_format);
+}
+
+static enum drm_mode_status
+intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+       const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+
+       /* Ensure TV refresh is close to desired refresh */
+       if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1)
+               return MODE_OK;
+       return MODE_CLOCK_RANGE;
+}
+
+
+static bool
+intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+                   struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_mode_config *drm_config = &dev->mode_config;
+       struct intel_output *intel_output = enc_to_intel_output(encoder);
+       const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output);
+       struct drm_encoder *other_encoder;
+
+       if (!tv_mode)
+               return false;
+
+       /* FIXME: lock encoder list */
+       list_for_each_entry(other_encoder, &drm_config->encoder_list, head) {
+               if (other_encoder != encoder &&
+                   other_encoder->crtc == encoder->crtc)
+                       return false;
+       }
+
+       adjusted_mode->clock = tv_mode->clock;
+       return true;
+}
+
+static void
+intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+                 struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_output *intel_output = enc_to_intel_output(encoder);
+       struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+       const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+       u32 tv_ctl;
+       u32 hctl1, hctl2, hctl3;
+       u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
+       u32 scctl1, scctl2, scctl3;
+       int i, j;
+       const struct video_levels *video_levels;
+       const struct color_conversion *color_conversion;
+       bool burst_ena;
+
+       if (!tv_mode)
+               return; /* can't happen (mode_prepare prevents this) */
+
+       tv_ctl = 0;
+
+       switch (tv_priv->type) {
+       default:
+       case DRM_MODE_CONNECTOR_Unknown:
+       case DRM_MODE_CONNECTOR_Composite:
+               tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
+               video_levels = tv_mode->composite_levels;
+               color_conversion = tv_mode->composite_color;
+               burst_ena = tv_mode->burst_ena;
+               break;
+       case DRM_MODE_CONNECTOR_Component:
+               tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
+               video_levels = &component_levels;
+               if (tv_mode->burst_ena)
+                       color_conversion = &sdtv_csc_yprpb;
+               else
+                       color_conversion = &hdtv_csc_yprpb;
+               burst_ena = false;
+               break;
+       case DRM_MODE_CONNECTOR_SVIDEO:
+               tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
+               video_levels = tv_mode->svideo_levels;
+               color_conversion = tv_mode->svideo_color;
+               burst_ena = tv_mode->burst_ena;
+               break;
+       }
+       hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
+               (tv_mode->htotal << TV_HTOTAL_SHIFT);
+
+       hctl2 = (tv_mode->hburst_start << 16) |
+               (tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
+
+       if (burst_ena)
+               hctl2 |= TV_BURST_ENA;
+
+       hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
+               (tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
+
+       vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
+               (tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
+               (tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
+
+       vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
+               (tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
+               (tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
+
+       vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
+               (tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
+               (tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
+
+       if (tv_mode->veq_ena)
+               vctl3 |= TV_EQUAL_ENA;
+
+       vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
+               (tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
+
+       vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
+               (tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
+
+       vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
+               (tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
+
+       vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
+               (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
+
+       if (intel_crtc->pipe == 1)
+               tv_ctl |= TV_ENC_PIPEB_SELECT;
+       tv_ctl |= tv_mode->oversample;
+
+       if (tv_mode->progressive)
+               tv_ctl |= TV_PROGRESSIVE;
+       if (tv_mode->trilevel_sync)
+               tv_ctl |= TV_TRILEVEL_SYNC;
+       if (tv_mode->pal_burst)
+               tv_ctl |= TV_PAL_BURST;
+       scctl1 = 0;
+       /* dda1 implies valid video levels */
+       if (tv_mode->dda1_inc) {
+               scctl1 |= TV_SC_DDA1_EN;
+               scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+       }
+
+       if (tv_mode->dda2_inc)
+               scctl1 |= TV_SC_DDA2_EN;
+
+       if (tv_mode->dda3_inc)
+               scctl1 |= TV_SC_DDA3_EN;
+
+       scctl1 |= tv_mode->sc_reset;
+       scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
+
+       scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
+               tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT;
+
+       scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT |
+               tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
+
+       /* Enable two fixes for the chips that need them. */
+       if (dev->pci_device < 0x2772)
+               tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
+
+       I915_WRITE(TV_H_CTL_1, hctl1);
+       I915_WRITE(TV_H_CTL_2, hctl2);
+       I915_WRITE(TV_H_CTL_3, hctl3);
+       I915_WRITE(TV_V_CTL_1, vctl1);
+       I915_WRITE(TV_V_CTL_2, vctl2);
+       I915_WRITE(TV_V_CTL_3, vctl3);
+       I915_WRITE(TV_V_CTL_4, vctl4);
+       I915_WRITE(TV_V_CTL_5, vctl5);
+       I915_WRITE(TV_V_CTL_6, vctl6);
+       I915_WRITE(TV_V_CTL_7, vctl7);
+       I915_WRITE(TV_SC_CTL_1, scctl1);
+       I915_WRITE(TV_SC_CTL_2, scctl2);
+       I915_WRITE(TV_SC_CTL_3, scctl3);
+
+       if (color_conversion) {
+               I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
+                          color_conversion->gy);
+               I915_WRITE(TV_CSC_Y2,(color_conversion->by << 16) |
+                          color_conversion->ay);
+               I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
+                          color_conversion->gu);
+               I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
+                          color_conversion->au);
+               I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
+                          color_conversion->gv);
+               I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
+                          color_conversion->av);
+       }
+
+       I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+       if (video_levels)
+               I915_WRITE(TV_CLR_LEVEL,
+                          ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
+                           (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
+       {
+               int pipeconf_reg = (intel_crtc->pipe == 0) ?
+                       PIPEACONF : PIPEBCONF;
+               int dspcntr_reg = (intel_crtc->plane == 0) ?
+                       DSPACNTR : DSPBCNTR;
+               int pipeconf = I915_READ(pipeconf_reg);
+               int dspcntr = I915_READ(dspcntr_reg);
+               int dspbase_reg = (intel_crtc->plane == 0) ?
+                       DSPAADDR : DSPBADDR;
+               int xpos = 0x0, ypos = 0x0;
+               unsigned int xsize, ysize;
+               /* Pipe must be off here */
+               I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
+               /* Flush the plane changes */
+               I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+
+               /* Wait for vblank for the disable to take effect */
+               if (!IS_I9XX(dev))
+                       intel_wait_for_vblank(dev);
+
+               I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
+               /* Wait for vblank for the disable to take effect. */
+               intel_wait_for_vblank(dev);
+
+               /* Filter ctl must be set before TV_WIN_SIZE */
+               I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
+               xsize = tv_mode->hblank_start - tv_mode->hblank_end;
+               if (tv_mode->progressive)
+                       ysize = tv_mode->nbr_end + 1;
+               else
+                       ysize = 2*tv_mode->nbr_end + 1;
+
+               xpos += tv_priv->margin[TV_MARGIN_LEFT];
+               ypos += tv_priv->margin[TV_MARGIN_TOP];
+               xsize -= (tv_priv->margin[TV_MARGIN_LEFT] +
+                         tv_priv->margin[TV_MARGIN_RIGHT]);
+               ysize -= (tv_priv->margin[TV_MARGIN_TOP] +
+                         tv_priv->margin[TV_MARGIN_BOTTOM]);
+               I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
+               I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
+
+               I915_WRITE(pipeconf_reg, pipeconf);
+               I915_WRITE(dspcntr_reg, dspcntr);
+               /* Flush the plane changes */
+               I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+       }
+
+       j = 0;
+       for (i = 0; i < 60; i++)
+               I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+       for (i = 0; i < 60; i++)
+               I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+       for (i = 0; i < 43; i++)
+               I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+       for (i = 0; i < 43; i++)
+               I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+       I915_WRITE(TV_DAC, 0);
+       I915_WRITE(TV_CTL, tv_ctl);
+}
+
+static const struct drm_display_mode reported_modes[] = {
+       {
+               .name = "NTSC 480i",
+               .clock = 107520,
+               .hdisplay = 1280,
+               .hsync_start = 1368,
+               .hsync_end = 1496,
+               .htotal = 1712,
+
+               .vdisplay = 1024,
+               .vsync_start = 1027,
+               .vsync_end = 1034,
+               .vtotal = 1104,
+               .type = DRM_MODE_TYPE_DRIVER,
+       },
+};
+
+/**
+ * Detects TV presence by checking for load.
+ *
+ * Requires that the current pipe's DPLL is active.
+
+ * \return true if TV is connected.
+ * \return false if TV is disconnected.
+ */
+static int
+intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
+{
+       struct drm_encoder *encoder = &intel_output->enc;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long irqflags;
+       u32 tv_ctl, save_tv_ctl;
+       u32 tv_dac, save_tv_dac;
+       int type = DRM_MODE_CONNECTOR_Unknown;
+
+       tv_dac = I915_READ(TV_DAC);
+
+       /* Disable TV interrupts around load detect or we'll recurse */
+       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+                             PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+
+       /*
+        * Detect TV by polling)
+        */
+       if (intel_output->load_detect_temp) {
+               /* TV not currently running, prod it with destructive detect */
+               save_tv_dac = tv_dac;
+               tv_ctl = I915_READ(TV_CTL);
+               save_tv_ctl = tv_ctl;
+               tv_ctl &= ~TV_ENC_ENABLE;
+               tv_ctl &= ~TV_TEST_MODE_MASK;
+               tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+               tv_dac &= ~TVDAC_SENSE_MASK;
+               tv_dac |= (TVDAC_STATE_CHG_EN |
+                          TVDAC_A_SENSE_CTL |
+                          TVDAC_B_SENSE_CTL |
+                          TVDAC_C_SENSE_CTL |
+                          DAC_CTL_OVERRIDE |
+                          DAC_A_0_7_V |
+                          DAC_B_0_7_V |
+                          DAC_C_0_7_V);
+               I915_WRITE(TV_CTL, tv_ctl);
+               I915_WRITE(TV_DAC, tv_dac);
+               intel_wait_for_vblank(dev);
+               tv_dac = I915_READ(TV_DAC);
+               I915_WRITE(TV_DAC, save_tv_dac);
+               I915_WRITE(TV_CTL, save_tv_ctl);
+       }
+       /*
+        *  A B C
+        *  0 1 1 Composite
+        *  1 0 X svideo
+        *  0 0 0 Component
+        */
+       if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+               DRM_DEBUG("Detected Composite TV connection\n");
+               type = DRM_MODE_CONNECTOR_Composite;
+       } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+               DRM_DEBUG("Detected S-Video TV connection\n");
+               type = DRM_MODE_CONNECTOR_SVIDEO;
+       } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+               DRM_DEBUG("Detected Component TV connection\n");
+               type = DRM_MODE_CONNECTOR_Component;
+       } else {
+               DRM_DEBUG("No TV connection detected\n");
+               type = -1;
+       }
+
+       /* Restore interrupt config */
+       spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+       i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+                            PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+       spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+
+       return type;
+}
+
+/**
+ * Detect the TV connection.
+ *
+ * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
+ * we have a pipe programmed in order to probe the TV.
+ */
+static enum drm_connector_status
+intel_tv_detect(struct drm_connector *connector)
+{
+       struct drm_crtc *crtc;
+       struct drm_display_mode mode;
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+       struct drm_encoder *encoder = &intel_output->enc;
+       int dpms_mode;
+       int type = tv_priv->type;
+
+       mode = reported_modes[0];
+       drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
+
+       if (encoder->crtc) {
+               type = intel_tv_detect_type(encoder->crtc, intel_output);
+       } else {
+               crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
+               if (crtc) {
+                       type = intel_tv_detect_type(crtc, intel_output);
+                       intel_release_load_detect_pipe(intel_output, dpms_mode);
+               } else
+                       type = -1;
+       }
+
+       if (type < 0)
+               return connector_status_disconnected;
+
+       return connector_status_connected;
+}
+
+static struct input_res {
+       char *name;
+       int w, h;
+} input_res_table[] =
+{
+       {"640x480", 640, 480},
+       {"800x600", 800, 600},
+       {"1024x768", 1024, 768},
+       {"1280x1024", 1280, 1024},
+       {"848x480", 848, 480},
+       {"1280x720", 1280, 720},
+       {"1920x1080", 1920, 1080},
+};
+
+/**
+ * Stub get_modes function.
+ *
+ * This should probably return a set of fixed modes, unless we can figure out
+ * how to probe modes off of TV connections.
+ */
+
+static int
+intel_tv_get_modes(struct drm_connector *connector)
+{
+       struct drm_display_mode *mode_ptr;
+       struct intel_output *intel_output = to_intel_output(connector);
+       const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
+       int j;
+
+       for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
+            j++) {
+               struct input_res *input = &input_res_table[j];
+               unsigned int hactive_s = input->w;
+               unsigned int vactive_s = input->h;
+
+               if (tv_mode->max_srcw && input->w > tv_mode->max_srcw)
+                       continue;
+
+               if (input->w > 1024 && (!tv_mode->progressive
+                                       && !tv_mode->component_only))
+                       continue;
+
+               mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode),
+                                     DRM_MEM_DRIVER);
+               strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
+
+               mode_ptr->hdisplay = hactive_s;
+               mode_ptr->hsync_start = hactive_s + 1;
+               mode_ptr->hsync_end = hactive_s + 64;
+               if (mode_ptr->hsync_end <= mode_ptr->hsync_start)
+                       mode_ptr->hsync_end = mode_ptr->hsync_start + 1;
+               mode_ptr->htotal = hactive_s + 96;
+
+               mode_ptr->vdisplay = vactive_s;
+               mode_ptr->vsync_start = vactive_s + 1;
+               mode_ptr->vsync_end = vactive_s + 32;
+               if (mode_ptr->vsync_end <= mode_ptr->vsync_start)
+                       mode_ptr->vsync_end = mode_ptr->vsync_start  + 1;
+               mode_ptr->vtotal = vactive_s + 33;
+
+               mode_ptr->clock = (int) (tv_mode->refresh *
+                                        mode_ptr->vtotal *
+                                        mode_ptr->htotal / 1000) / 1000;
+
+               mode_ptr->type = DRM_MODE_TYPE_DRIVER;
+               drm_mode_probed_add(connector, mode_ptr);
+       }
+
+       return 0;
+}
+
+static void
+intel_tv_destroy (struct drm_connector *connector)
+{
+       struct intel_output *intel_output = to_intel_output(connector);
+
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+       drm_free(intel_output, sizeof(struct intel_output) + sizeof(struct intel_tv_priv),
+                DRM_MEM_DRIVER);
+}
+
+
+static int
+intel_tv_set_property(struct drm_connector *connector, struct drm_property *property,
+                     uint64_t val)
+{
+       struct drm_device *dev = connector->dev;
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+       int ret = 0;
+
+       ret = drm_connector_property_set_value(connector, property, val);
+       if (ret < 0)
+               goto out;
+
+       if (property == dev->mode_config.tv_left_margin_property)
+               tv_priv->margin[TV_MARGIN_LEFT] = val;
+       else if (property == dev->mode_config.tv_right_margin_property)
+               tv_priv->margin[TV_MARGIN_RIGHT] = val;
+       else if (property == dev->mode_config.tv_top_margin_property)
+               tv_priv->margin[TV_MARGIN_TOP] = val;
+       else if (property == dev->mode_config.tv_bottom_margin_property)
+               tv_priv->margin[TV_MARGIN_BOTTOM] = val;
+       else if (property == dev->mode_config.tv_mode_property) {
+               if (val >= NUM_TV_MODES) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               tv_priv->tv_format = tv_modes[val].name;
+               intel_tv_mode_set(&intel_output->enc, NULL, NULL);
+       } else {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       intel_tv_mode_set(&intel_output->enc, NULL, NULL);
+out:
+       return ret;
+}
+
+static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
+       .dpms = intel_tv_dpms,
+       .mode_fixup = intel_tv_mode_fixup,
+       .prepare = intel_encoder_prepare,
+       .mode_set = intel_tv_mode_set,
+       .commit = intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs intel_tv_connector_funcs = {
+       .save = intel_tv_save,
+       .restore = intel_tv_restore,
+       .detect = intel_tv_detect,
+       .destroy = intel_tv_destroy,
+       .set_property = intel_tv_set_property,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
+       .mode_valid = intel_tv_mode_valid,
+       .get_modes = intel_tv_get_modes,
+       .best_encoder = intel_best_encoder,
+};
+
+static void intel_tv_enc_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_tv_enc_funcs = {
+       .destroy = intel_tv_enc_destroy,
+};
+
+
+void
+intel_tv_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_connector *connector;
+       struct intel_output *intel_output;
+       struct intel_tv_priv *tv_priv;
+       u32 tv_dac_on, tv_dac_off, save_tv_dac;
+       char **tv_format_names;
+       int i, initial_mode = 0;
+
+       if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+               return;
+
+       /* Even if we have an encoder we may not have a connector */
+       if (!dev_priv->int_tv_support)
+               return;
+
+       /*
+        * Sanity check the TV output by checking to see if the
+        * DAC register holds a value
+        */
+       save_tv_dac = I915_READ(TV_DAC);
+
+       I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+       tv_dac_on = I915_READ(TV_DAC);
+
+       I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+       tv_dac_off = I915_READ(TV_DAC);
+
+       I915_WRITE(TV_DAC, save_tv_dac);
+
+       /*
+        * If the register does not hold the state change enable
+        * bit, (either as a 0 or a 1), assume it doesn't really
+        * exist
+        */
+       if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 ||
+           (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
+               return;
+
+       intel_output = drm_calloc(1, sizeof(struct intel_output) +
+                                 sizeof(struct intel_tv_priv), DRM_MEM_DRIVER);
+       if (!intel_output) {
+               return;
+       }
+       connector = &intel_output->base;
+
+       drm_connector_init(dev, connector, &intel_tv_connector_funcs,
+                          DRM_MODE_CONNECTOR_SVIDEO);
+
+       drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs,
+                        DRM_MODE_ENCODER_TVDAC);
+
+       drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
+       tv_priv = (struct intel_tv_priv *)(intel_output + 1);
+       intel_output->type = INTEL_OUTPUT_TVOUT;
+       intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1));
+       intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+       intel_output->dev_priv = tv_priv;
+       tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
+
+       /* BIOS margin values */
+       tv_priv->margin[TV_MARGIN_LEFT] = 54;
+       tv_priv->margin[TV_MARGIN_TOP] = 36;
+       tv_priv->margin[TV_MARGIN_RIGHT] = 46;
+       tv_priv->margin[TV_MARGIN_BOTTOM] = 37;
+
+       tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
+
+       drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs);
+       drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+
+       /* Create TV properties then attach current values */
+       tv_format_names = drm_alloc(sizeof(char *) * NUM_TV_MODES,
+                                   DRM_MEM_DRIVER);
+       if (!tv_format_names)
+               goto out;
+       for (i = 0; i < NUM_TV_MODES; i++)
+               tv_format_names[i] = tv_modes[i].name;
+       drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names);
+
+       drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
+                                  initial_mode);
+       drm_connector_attach_property(connector,
+                                  dev->mode_config.tv_left_margin_property,
+                                  tv_priv->margin[TV_MARGIN_LEFT]);
+       drm_connector_attach_property(connector,
+                                  dev->mode_config.tv_top_margin_property,
+                                  tv_priv->margin[TV_MARGIN_TOP]);
+       drm_connector_attach_property(connector,
+                                  dev->mode_config.tv_right_margin_property,
+                                  tv_priv->margin[TV_MARGIN_RIGHT]);
+       drm_connector_attach_property(connector,
+                                  dev->mode_config.tv_bottom_margin_property,
+                                  tv_priv->margin[TV_MARGIN_BOTTOM]);
+out:
+       drm_sysfs_connector_add(connector);
+}
index 4b27d9abb7bcba5b0b77b8ab2b8f48e60859c2f2..cace3964feebb4288dae0a822bb03284043ffda7 100644 (file)
@@ -860,12 +860,12 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
  * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
  * be careful about how this function is called.
  */
-static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
+static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
 {
-       drm_radeon_private_t *dev_priv = dev->dev_private;
        drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+       struct drm_radeon_master_private *master_priv = master->driver_priv;
 
-       buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
+       buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
        buf->pending = 1;
        buf->used = 0;
 }
@@ -1027,6 +1027,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
                      drm_radeon_kcmd_buffer_t *cmdbuf)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
+       struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
        struct drm_device_dma *dma = dev->dma;
        struct drm_buf *buf = NULL;
        int emit_dispatch_age = 0;
@@ -1134,7 +1135,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
                        }
 
                        emit_dispatch_age = 1;
-                       r300_discard_buffer(dev, buf);
+                       r300_discard_buffer(dev, file_priv->master, buf);
                        break;
 
                case R300_CMD_WAIT:
@@ -1189,7 +1190,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
 
                /* Emit the vertex buffer age */
                BEGIN_RING(2);
-               RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
+               RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch);
                ADVANCE_RING();
        }
 
index dcebb4bee7aaf9e0e845db50296798f76686d1a6..63212d7bbc287a36ef9bcc3cba45b3ac6cd97deb 100644 (file)
@@ -31,6 +31,7 @@
 
 #include "drmP.h"
 #include "drm.h"
+#include "drm_sarea.h"
 #include "radeon_drm.h"
 #include "radeon_drv.h"
 #include "r300_reg.h"
@@ -667,15 +668,14 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
                RADEON_WRITE(RADEON_BUS_CNTL, tmp);
        } /* PCIE cards appears to not need this */
 
-       dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
-       RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
+       dev_priv->scratch[0] = 0;
+       RADEON_WRITE(RADEON_LAST_FRAME_REG, 0);
 
-       dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0;
-       RADEON_WRITE(RADEON_LAST_DISPATCH_REG,
-                    dev_priv->sarea_priv->last_dispatch);
+       dev_priv->scratch[1] = 0;
+       RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0);
 
-       dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
-       RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
+       dev_priv->scratch[2] = 0;
+       RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0);
 
        radeon_do_wait_for_idle(dev_priv);
 
@@ -871,9 +871,11 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
        }
 }
 
-static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
+static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
+                            struct drm_file *file_priv)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
+       struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
 
        DRM_DEBUG("\n");
 
@@ -998,8 +1000,8 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
        dev_priv->buffers_offset = init->buffers_offset;
        dev_priv->gart_textures_offset = init->gart_textures_offset;
 
-       dev_priv->sarea = drm_getsarea(dev);
-       if (!dev_priv->sarea) {
+       master_priv->sarea = drm_getsarea(dev);
+       if (!master_priv->sarea) {
                DRM_ERROR("could not find sarea!\n");
                radeon_do_cleanup_cp(dev);
                return -EINVAL;
@@ -1035,10 +1037,6 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
                }
        }
 
-       dev_priv->sarea_priv =
-           (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle +
-                                   init->sarea_priv_offset);
-
 #if __OS_HAS_AGP
        if (dev_priv->flags & RADEON_IS_AGP) {
                drm_core_ioremap(dev_priv->cp_ring, dev);
@@ -1329,7 +1327,7 @@ int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_pri
        case RADEON_INIT_CP:
        case RADEON_INIT_R200_CP:
        case RADEON_INIT_R300_CP:
-               return radeon_do_init_cp(dev, init);
+               return radeon_do_init_cp(dev, init, file_priv);
        case RADEON_CLEANUP_CP:
                return radeon_do_cleanup_cp(dev);
        }
@@ -1768,6 +1766,51 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
        return ret;
 }
 
+int radeon_master_create(struct drm_device *dev, struct drm_master *master)
+{
+       struct drm_radeon_master_private *master_priv;
+       unsigned long sareapage;
+       int ret;
+
+       master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
+       if (!master_priv)
+               return -ENOMEM;
+
+       /* prebuild the SAREA */
+       sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
+       ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER,
+                        &master_priv->sarea);
+       if (ret) {
+               DRM_ERROR("SAREA setup failed\n");
+               return ret;
+       }
+       master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
+       master_priv->sarea_priv->pfCurrentPage = 0;
+
+       master->driver_priv = master_priv;
+       return 0;
+}
+
+void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
+{
+       struct drm_radeon_master_private *master_priv = master->driver_priv;
+
+       if (!master_priv)
+               return;
+
+       if (master_priv->sarea_priv &&
+           master_priv->sarea_priv->pfCurrentPage != 0)
+               radeon_cp_dispatch_flip(dev, master);
+
+       master_priv->sarea_priv = NULL;
+       if (master_priv->sarea)
+               drm_rmmap_locked(dev, master_priv->sarea);
+
+       drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
+
+       master->driver_priv = NULL;
+}
+
 /* Create mappings for registers and framebuffer so userland doesn't necessarily
  * have to find them.
  */
index 71af746a4e4760d24c7c36e1141882b43117137a..fef207881f45eeb34d5149c571b80fefbb2e47b2 100644 (file)
@@ -96,6 +96,8 @@ static struct drm_driver driver = {
        .enable_vblank = radeon_enable_vblank,
        .disable_vblank = radeon_disable_vblank,
        .dri_library_name = dri_library_name,
+       .master_create = radeon_master_create,
+       .master_destroy = radeon_master_destroy,
        .irq_preinstall = radeon_driver_irq_preinstall,
        .irq_postinstall = radeon_driver_irq_postinstall,
        .irq_uninstall = radeon_driver_irq_uninstall,
index 3bbb871b25d5102a2b6d832702cc55bc338a70ea..490bc7ceef60fe21dcaa8bb800fdc18fdbe1aea1 100644 (file)
@@ -226,9 +226,13 @@ struct radeon_virt_surface {
 #define RADEON_FLUSH_EMITED    (1 < 0)
 #define RADEON_PURGE_EMITED    (1 < 1)
 
+struct drm_radeon_master_private {
+       drm_local_map_t *sarea;
+       drm_radeon_sarea_t *sarea_priv;
+};
+
 typedef struct drm_radeon_private {
        drm_radeon_ring_buffer_t ring;
-       drm_radeon_sarea_t *sarea_priv;
 
        u32 fb_location;
        u32 fb_size;
@@ -409,6 +413,9 @@ extern int radeon_driver_open(struct drm_device *dev,
 extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
                                unsigned long arg);
 
+extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
+extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
+extern void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master);
 /* r300_cmdbuf.c */
 extern void r300_init_reg_flags(struct drm_device *dev);
 
@@ -1335,8 +1342,9 @@ do {                                                                      \
 } while (0)
 
 #define VB_AGE_TEST_WITH_RETURN( dev_priv )                            \
-do {                                                                   \
-       drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;          \
+do {                                                           \
+       struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; \
+       drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;       \
        if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) {         \
                int __ret = radeon_do_cp_idle( dev_priv );              \
                if ( __ret ) return __ret;                              \
index 5d7153fcc7b0063625a8232e66b6f89b09a042c3..ef940a079dcbbb8488a1cc977f2c328e3e79d4a4 100644 (file)
@@ -742,13 +742,14 @@ static struct {
  */
 
 static void radeon_clear_box(drm_radeon_private_t * dev_priv,
+                            struct drm_radeon_master_private *master_priv,
                             int x, int y, int w, int h, int r, int g, int b)
 {
        u32 color;
        RING_LOCALS;
 
-       x += dev_priv->sarea_priv->boxes[0].x1;
-       y += dev_priv->sarea_priv->boxes[0].y1;
+       x += master_priv->sarea_priv->boxes[0].x1;
+       y += master_priv->sarea_priv->boxes[0].y1;
 
        switch (dev_priv->color_fmt) {
        case RADEON_COLOR_FORMAT_RGB565:
@@ -776,7 +777,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv,
                 RADEON_GMC_SRC_DATATYPE_COLOR |
                 RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
 
-       if (dev_priv->sarea_priv->pfCurrentPage == 1) {
+       if (master_priv->sarea_priv->pfCurrentPage == 1) {
                OUT_RING(dev_priv->front_pitch_offset);
        } else {
                OUT_RING(dev_priv->back_pitch_offset);
@@ -790,7 +791,7 @@ static void radeon_clear_box(drm_radeon_private_t * dev_priv,
        ADVANCE_RING();
 }
 
-static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
+static void radeon_cp_performance_boxes(drm_radeon_private_t *dev_priv, struct drm_radeon_master_private *master_priv)
 {
        /* Collapse various things into a wait flag -- trying to
         * guess if userspase slept -- better just to have them tell us.
@@ -807,12 +808,12 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
        /* Purple box for page flipping
         */
        if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
-               radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255);
+               radeon_clear_box(dev_priv, master_priv, 4, 4, 8, 8, 255, 0, 255);
 
        /* Red box if we have to wait for idle at any point
         */
        if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
-               radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0);
+               radeon_clear_box(dev_priv, master_priv, 16, 4, 8, 8, 255, 0, 0);
 
        /* Blue box: lost context?
         */
@@ -820,12 +821,12 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
        /* Yellow box for texture swaps
         */
        if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
-               radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0);
+               radeon_clear_box(dev_priv, master_priv, 40, 4, 8, 8, 255, 255, 0);
 
        /* Green box if hardware never idles (as far as we can tell)
         */
        if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
-               radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
+               radeon_clear_box(dev_priv, master_priv, 64, 4, 8, 8, 0, 255, 0);
 
        /* Draw bars indicating number of buffers allocated
         * (not a great measure, easily confused)
@@ -834,7 +835,7 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
                if (dev_priv->stats.requested_bufs > 100)
                        dev_priv->stats.requested_bufs = 100;
 
-               radeon_clear_box(dev_priv, 4, 16,
+               radeon_clear_box(dev_priv, master_priv, 4, 16,
                                 dev_priv->stats.requested_bufs, 4,
                                 196, 128, 128);
        }
@@ -848,11 +849,13 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
  */
 
 static void radeon_cp_dispatch_clear(struct drm_device * dev,
+                                    struct drm_master *master,
                                     drm_radeon_clear_t * clear,
                                     drm_radeon_clear_rect_t * depth_boxes)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
-       drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+       struct drm_radeon_master_private *master_priv = master->driver_priv;
+       drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
        drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
        int nbox = sarea_priv->nbox;
        struct drm_clip_rect *pbox = sarea_priv->boxes;
@@ -864,7 +867,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
 
        dev_priv->stats.clears++;
 
-       if (dev_priv->sarea_priv->pfCurrentPage == 1) {
+       if (sarea_priv->pfCurrentPage == 1) {
                unsigned int tmp = flags;
 
                flags &= ~(RADEON_FRONT | RADEON_BACK);
@@ -890,7 +893,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
 
                /* Make sure we restore the 3D state next time.
                 */
-               dev_priv->sarea_priv->ctx_owner = 0;
+               sarea_priv->ctx_owner = 0;
 
                for (i = 0; i < nbox; i++) {
                        int x = pbox[i].x1;
@@ -967,7 +970,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
                /* Make sure we restore the 3D state next time.
                 * we haven't touched any "normal" state - still need this?
                 */
-               dev_priv->sarea_priv->ctx_owner = 0;
+               sarea_priv->ctx_owner = 0;
 
                if ((dev_priv->flags & RADEON_HAS_HIERZ)
                    && (flags & RADEON_USE_HIERZ)) {
@@ -1214,7 +1217,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
 
                /* Make sure we restore the 3D state next time.
                 */
-               dev_priv->sarea_priv->ctx_owner = 0;
+               sarea_priv->ctx_owner = 0;
 
                for (i = 0; i < nbox; i++) {
 
@@ -1285,7 +1288,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
 
                /* Make sure we restore the 3D state next time.
                 */
-               dev_priv->sarea_priv->ctx_owner = 0;
+               sarea_priv->ctx_owner = 0;
 
                for (i = 0; i < nbox; i++) {
 
@@ -1328,20 +1331,21 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
         * wait on this value before performing the clear ioctl.  We
         * need this because the card's so damned fast...
         */
-       dev_priv->sarea_priv->last_clear++;
+       sarea_priv->last_clear++;
 
        BEGIN_RING(4);
 
-       RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear);
+       RADEON_CLEAR_AGE(sarea_priv->last_clear);
        RADEON_WAIT_UNTIL_IDLE();
 
        ADVANCE_RING();
 }
 
-static void radeon_cp_dispatch_swap(struct drm_device * dev)
+static void radeon_cp_dispatch_swap(struct drm_device *dev, struct drm_master *master)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
-       drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+       struct drm_radeon_master_private *master_priv = master->driver_priv;
+       drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
        int nbox = sarea_priv->nbox;
        struct drm_clip_rect *pbox = sarea_priv->boxes;
        int i;
@@ -1351,7 +1355,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev)
        /* Do some trivial performance monitoring...
         */
        if (dev_priv->do_boxes)
-               radeon_cp_performance_boxes(dev_priv);
+               radeon_cp_performance_boxes(dev_priv, master_priv);
 
        /* Wait for the 3D stream to idle before dispatching the bitblt.
         * This will prevent data corruption between the two streams.
@@ -1385,7 +1389,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev)
                /* Make this work even if front & back are flipped:
                 */
                OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
-               if (dev_priv->sarea_priv->pfCurrentPage == 0) {
+               if (sarea_priv->pfCurrentPage == 0) {
                        OUT_RING(dev_priv->back_pitch_offset);
                        OUT_RING(dev_priv->front_pitch_offset);
                } else {
@@ -1405,31 +1409,32 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev)
         * throttle the framerate by waiting for this value before
         * performing the swapbuffer ioctl.
         */
-       dev_priv->sarea_priv->last_frame++;
+       sarea_priv->last_frame++;
 
        BEGIN_RING(4);
 
-       RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
+       RADEON_FRAME_AGE(sarea_priv->last_frame);
        RADEON_WAIT_UNTIL_2D_IDLE();
 
        ADVANCE_RING();
 }
 
-static void radeon_cp_dispatch_flip(struct drm_device * dev)
+void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
-       struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle;
-       int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
+       struct drm_radeon_master_private *master_priv = master->driver_priv;
+       struct drm_sarea *sarea = (struct drm_sarea *)master_priv->sarea->handle;
+       int offset = (master_priv->sarea_priv->pfCurrentPage == 1)
            ? dev_priv->front_offset : dev_priv->back_offset;
        RING_LOCALS;
        DRM_DEBUG("pfCurrentPage=%d\n",
-                 dev_priv->sarea_priv->pfCurrentPage);
+                 master_priv->sarea_priv->pfCurrentPage);
 
        /* Do some trivial performance monitoring...
         */
        if (dev_priv->do_boxes) {
                dev_priv->stats.boxes |= RADEON_BOX_FLIP;
-               radeon_cp_performance_boxes(dev_priv);
+               radeon_cp_performance_boxes(dev_priv, master_priv);
        }
 
        /* Update the frame offsets for both CRTCs
@@ -1441,7 +1446,7 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev)
                     ((sarea->frame.y * dev_priv->front_pitch +
                       sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
                     + offset);
-       OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
+       OUT_RING_REG(RADEON_CRTC2_OFFSET, master_priv->sarea_priv->crtc2_base
                     + offset);
 
        ADVANCE_RING();
@@ -1450,13 +1455,13 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev)
         * throttle the framerate by waiting for this value before
         * performing the swapbuffer ioctl.
         */
-       dev_priv->sarea_priv->last_frame++;
-       dev_priv->sarea_priv->pfCurrentPage =
-               1 - dev_priv->sarea_priv->pfCurrentPage;
+       master_priv->sarea_priv->last_frame++;
+       master_priv->sarea_priv->pfCurrentPage =
+               1 - master_priv->sarea_priv->pfCurrentPage;
 
        BEGIN_RING(2);
 
-       RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
+       RADEON_FRAME_AGE(master_priv->sarea_priv->last_frame);
 
        ADVANCE_RING();
 }
@@ -1494,11 +1499,13 @@ typedef struct {
 } drm_radeon_tcl_prim_t;
 
 static void radeon_cp_dispatch_vertex(struct drm_device * dev,
+                                     struct drm_file *file_priv,
                                      struct drm_buf * buf,
                                      drm_radeon_tcl_prim_t * prim)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
-       drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+       struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+       drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
        int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
        int numverts = (int)prim->numverts;
        int nbox = sarea_priv->nbox;
@@ -1539,13 +1546,14 @@ static void radeon_cp_dispatch_vertex(struct drm_device * dev,
        } while (i < nbox);
 }
 
-static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
+static void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
+       struct drm_radeon_master_private *master_priv = master->driver_priv;
        drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
        RING_LOCALS;
 
-       buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
+       buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
 
        /* Emit the vertex buffer age */
        BEGIN_RING(2);
@@ -1590,12 +1598,14 @@ static void radeon_cp_dispatch_indirect(struct drm_device * dev,
        }
 }
 
-static void radeon_cp_dispatch_indices(struct drm_device * dev,
+static void radeon_cp_dispatch_indices(struct drm_device *dev,
+                                      struct drm_master *master,
                                       struct drm_buf * elt_buf,
                                       drm_radeon_tcl_prim_t * prim)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
-       drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+       struct drm_radeon_master_private *master_priv = master->driver_priv;
+       drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
        int offset = dev_priv->gart_buffers_offset + prim->offset;
        u32 *data;
        int dwords;
@@ -1870,7 +1880,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
                ADVANCE_RING();
                COMMIT_RING();
 
-               radeon_cp_discard_buffer(dev, buf);
+               radeon_cp_discard_buffer(dev, file_priv->master, buf);
 
                /* Update the input parameters for next time */
                image->y += height;
@@ -2110,7 +2120,8 @@ static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_fi
 static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
-       drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+       struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+       drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
        drm_radeon_clear_t *clear = data;
        drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
        DRM_DEBUG("\n");
@@ -2126,7 +2137,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
                               sarea_priv->nbox * sizeof(depth_boxes[0])))
                return -EFAULT;
 
-       radeon_cp_dispatch_clear(dev, clear, depth_boxes);
+       radeon_cp_dispatch_clear(dev, file_priv->master, clear, depth_boxes);
 
        COMMIT_RING();
        return 0;
@@ -2134,9 +2145,10 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
 
 /* Not sure why this isn't set all the time:
  */
-static int radeon_do_init_pageflip(struct drm_device * dev)
+static int radeon_do_init_pageflip(struct drm_device *dev, struct drm_master *master)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
+       struct drm_radeon_master_private *master_priv = master->driver_priv;
        RING_LOCALS;
 
        DRM_DEBUG("\n");
@@ -2153,8 +2165,8 @@ static int radeon_do_init_pageflip(struct drm_device * dev)
 
        dev_priv->page_flipping = 1;
 
-       if (dev_priv->sarea_priv->pfCurrentPage != 1)
-               dev_priv->sarea_priv->pfCurrentPage = 0;
+       if (master_priv->sarea_priv->pfCurrentPage != 1)
+               master_priv->sarea_priv->pfCurrentPage = 0;
 
        return 0;
 }
@@ -2172,9 +2184,9 @@ static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *f
        RING_SPACE_TEST_WITH_RETURN(dev_priv);
 
        if (!dev_priv->page_flipping)
-               radeon_do_init_pageflip(dev);
+               radeon_do_init_pageflip(dev, file_priv->master);
 
-       radeon_cp_dispatch_flip(dev);
+       radeon_cp_dispatch_flip(dev, file_priv->master);
 
        COMMIT_RING();
        return 0;
@@ -2183,7 +2195,9 @@ static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *f
 static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
-       drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+       struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+       drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+
        DRM_DEBUG("\n");
 
        LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -2193,8 +2207,8 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f
        if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
                sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
 
-       radeon_cp_dispatch_swap(dev);
-       dev_priv->sarea_priv->ctx_owner = 0;
+       radeon_cp_dispatch_swap(dev, file_priv->master);
+       sarea_priv->ctx_owner = 0;
 
        COMMIT_RING();
        return 0;
@@ -2203,7 +2217,8 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f
 static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
-       drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+       struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+       drm_radeon_sarea_t *sarea_priv;
        struct drm_device_dma *dma = dev->dma;
        struct drm_buf *buf;
        drm_radeon_vertex_t *vertex = data;
@@ -2211,6 +2226,8 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file
 
        LOCK_TEST_WITH_RETURN(dev, file_priv);
 
+       sarea_priv = master_priv->sarea_priv;
+
        DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
                  DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
 
@@ -2263,13 +2280,13 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file
                prim.finish = vertex->count;    /* unused */
                prim.prim = vertex->prim;
                prim.numverts = vertex->count;
-               prim.vc_format = dev_priv->sarea_priv->vc_format;
+               prim.vc_format = sarea_priv->vc_format;
 
-               radeon_cp_dispatch_vertex(dev, buf, &prim);
+               radeon_cp_dispatch_vertex(dev, file_priv, buf, &prim);
        }
 
        if (vertex->discard) {
-               radeon_cp_discard_buffer(dev, buf);
+               radeon_cp_discard_buffer(dev, file_priv->master, buf);
        }
 
        COMMIT_RING();
@@ -2279,7 +2296,8 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file
 static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
-       drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+       struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+       drm_radeon_sarea_t *sarea_priv;
        struct drm_device_dma *dma = dev->dma;
        struct drm_buf *buf;
        drm_radeon_indices_t *elts = data;
@@ -2288,6 +2306,8 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file
 
        LOCK_TEST_WITH_RETURN(dev, file_priv);
 
+       sarea_priv = master_priv->sarea_priv;
+
        DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
                  DRM_CURRENTPID, elts->idx, elts->start, elts->end,
                  elts->discard);
@@ -2353,11 +2373,11 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file
        prim.prim = elts->prim;
        prim.offset = 0;        /* offset from start of dma buffers */
        prim.numverts = RADEON_MAX_VB_VERTS;    /* duh */
-       prim.vc_format = dev_priv->sarea_priv->vc_format;
+       prim.vc_format = sarea_priv->vc_format;
 
-       radeon_cp_dispatch_indices(dev, buf, &prim);
+       radeon_cp_dispatch_indices(dev, file_priv->master, buf, &prim);
        if (elts->discard) {
-               radeon_cp_discard_buffer(dev, buf);
+               radeon_cp_discard_buffer(dev, file_priv->master, buf);
        }
 
        COMMIT_RING();
@@ -2468,7 +2488,7 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil
         */
        radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
        if (indirect->discard) {
-               radeon_cp_discard_buffer(dev, buf);
+               radeon_cp_discard_buffer(dev, file_priv->master, buf);
        }
 
        COMMIT_RING();
@@ -2478,7 +2498,8 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil
 static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
-       drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+       struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+       drm_radeon_sarea_t *sarea_priv;
        struct drm_device_dma *dma = dev->dma;
        struct drm_buf *buf;
        drm_radeon_vertex2_t *vertex = data;
@@ -2487,6 +2508,8 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file
 
        LOCK_TEST_WITH_RETURN(dev, file_priv);
 
+       sarea_priv = master_priv->sarea_priv;
+
        DRM_DEBUG("pid=%d index=%d discard=%d\n",
                  DRM_CURRENTPID, vertex->idx, vertex->discard);
 
@@ -2547,12 +2570,12 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file
                        tclprim.offset = prim.numverts * 64;
                        tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
 
-                       radeon_cp_dispatch_indices(dev, buf, &tclprim);
+                       radeon_cp_dispatch_indices(dev, file_priv->master, buf, &tclprim);
                } else {
                        tclprim.numverts = prim.numverts;
                        tclprim.offset = 0;     /* not used */
 
-                       radeon_cp_dispatch_vertex(dev, buf, &tclprim);
+                       radeon_cp_dispatch_vertex(dev, file_priv, buf, &tclprim);
                }
 
                if (sarea_priv->nbox == 1)
@@ -2560,7 +2583,7 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file
        }
 
        if (vertex->discard) {
-               radeon_cp_discard_buffer(dev, buf);
+               radeon_cp_discard_buffer(dev, file_priv->master, buf);
        }
 
        COMMIT_RING();
@@ -2909,7 +2932,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
                                goto err;
                        }
 
-                       radeon_cp_discard_buffer(dev, buf);
+                       radeon_cp_discard_buffer(dev, file_priv->master, buf);
                        break;
 
                case RADEON_CMD_PACKET3:
@@ -3020,7 +3043,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
                 */
        case RADEON_PARAM_SAREA_HANDLE:
                /* The lock is the first dword in the sarea. */
-               value = (long)dev->lock.hw_lock;
+               /* no users of this parameter */
                break;
 #endif
        case RADEON_PARAM_GART_TEX_HANDLE:
@@ -3064,6 +3087,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
 static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
+       struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
        drm_radeon_setparam_t *sp = data;
        struct drm_radeon_driver_file_fields *radeon_priv;
 
@@ -3078,12 +3102,14 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil
                        DRM_DEBUG("color tiling disabled\n");
                        dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
                        dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
-                       dev_priv->sarea_priv->tiling_enabled = 0;
+                       if (master_priv->sarea_priv)
+                               master_priv->sarea_priv->tiling_enabled = 0;
                } else if (sp->value == 1) {
                        DRM_DEBUG("color tiling enabled\n");
                        dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
                        dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
-                       dev_priv->sarea_priv->tiling_enabled = 1;
+                       if (master_priv->sarea_priv)
+                               master_priv->sarea_priv->tiling_enabled = 1;
                }
                break;
        case RADEON_SETPARAM_PCIGART_LOCATION:
@@ -3129,14 +3155,6 @@ void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
 
 void radeon_driver_lastclose(struct drm_device *dev)
 {
-       if (dev->dev_private) {
-               drm_radeon_private_t *dev_priv = dev->dev_private;
-
-               if (dev_priv->sarea_priv &&
-                   dev_priv->sarea_priv->pfCurrentPage != 0)
-                       radeon_cp_dispatch_flip(dev);
-       }
-
        radeon_do_release(dev);
 }
 
index b9b7fc6ff1ebcc657370701e9dcc53d3ce20bfdb..e1ece89fe9222b5ef810516ca0ea7384fd98eb2e 100644 (file)
@@ -697,7 +697,7 @@ static enum hrtimer_restart ads7846_timer(struct hrtimer *handle)
        struct ads7846  *ts = container_of(handle, struct ads7846, timer);
        int             status = 0;
 
-       spin_lock_irq(&ts->lock);
+       spin_lock(&ts->lock);
 
        if (unlikely(!get_pendown_state(ts) ||
                     device_suspended(&ts->spi->dev))) {
@@ -728,7 +728,7 @@ static enum hrtimer_restart ads7846_timer(struct hrtimer *handle)
                        dev_err(&ts->spi->dev, "spi_async --> %d\n", status);
        }
 
-       spin_unlock_irq(&ts->lock);
+       spin_unlock(&ts->lock);
        return HRTIMER_NORESTART;
 }
 
index ce26c84af064339f196a0a2eb62b0c55866408b2..3326750ec02c9d056329cb075842954012bc25cb 100644 (file)
@@ -1060,7 +1060,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad_page_pool;
        }
 
-       cc->bs = bioset_create(MIN_IOS, MIN_IOS);
+       cc->bs = bioset_create(MIN_IOS, 0);
        if (!cc->bs) {
                ti->error = "Cannot allocate crypt bioset";
                goto bad_bs;
index 2fd6d4450637943963d88c948080cf4f0ed73a92..a34338567a2a601c3cdbda72441d54299301f89a 100644 (file)
@@ -56,7 +56,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
        if (!client->pool)
                goto bad;
 
-       client->bios = bioset_create(16, 16);
+       client->bios = bioset_create(16, 0);
        if (!client->bios)
                goto bad;
 
index 343094c3feeb834c55f8aa1ecd3baa57890eca1d..421c9f02d8ca7329c41168e7beb5f1c2b3a6c6d1 100644 (file)
@@ -1093,7 +1093,7 @@ static struct mapped_device *alloc_dev(int minor)
        if (!md->tio_pool)
                goto bad_tio_pool;
 
-       md->bs = bioset_create(16, 16);
+       md->bs = bioset_create(16, 0);
        if (!md->bs)
                goto bad_no_bioset;
 
index 2de5a3238c947be89213119d9cf468d2fa59e11d..f78371b2252976efb8ab86c7eaf87a7240949ea5 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/pci.h>
 #include <linux/irq.h>
 #include <asm/io_apic.h>
+#include <asm/smp.h>
 #include <linux/intel-iommu.h>
 #include "intr_remapping.h"
 
@@ -19,17 +20,75 @@ struct irq_2_iommu {
        u8  irte_mask;
 };
 
-static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
+#ifdef CONFIG_SPARSE_IRQ
+static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
+{
+       struct irq_2_iommu *iommu;
+       int node;
+
+       node = cpu_to_node(cpu);
+
+       iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
+       printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node);
+
+       return iommu;
+}
 
 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
 {
-       return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL;
+       struct irq_desc *desc;
+
+       desc = irq_to_desc(irq);
+
+       if (WARN_ON_ONCE(!desc))
+               return NULL;
+
+       return desc->irq_2_iommu;
+}
+
+static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu)
+{
+       struct irq_desc *desc;
+       struct irq_2_iommu *irq_iommu;
+
+       /*
+        * alloc irq desc if not allocated already.
+        */
+       desc = irq_to_desc_alloc_cpu(irq, cpu);
+       if (!desc) {
+               printk(KERN_INFO "can not get irq_desc for %d\n", irq);
+               return NULL;
+       }
+
+       irq_iommu = desc->irq_2_iommu;
+
+       if (!irq_iommu)
+               desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu);
+
+       return desc->irq_2_iommu;
 }
 
+static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
+{
+       return irq_2_iommu_alloc_cpu(irq, boot_cpu_id);
+}
+
+#else /* !CONFIG_SPARSE_IRQ */
+
+static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
+
+static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
+{
+       if (irq < nr_irqs)
+               return &irq_2_iommuX[irq];
+
+       return NULL;
+}
 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
 {
        return irq_2_iommu(irq);
 }
+#endif
 
 static DEFINE_SPINLOCK(irq_2_ir_lock);
 
@@ -86,9 +145,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
        if (!count)
                return -1;
 
+#ifndef CONFIG_SPARSE_IRQ
        /* protect irq_2_iommu_alloc later */
        if (irq >= nr_irqs)
                return -1;
+#endif
 
        /*
         * start the IRTE search from index 0.
@@ -130,6 +191,12 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
                table->base[i].present = 1;
 
        irq_iommu = irq_2_iommu_alloc(irq);
+       if (!irq_iommu) {
+               spin_unlock(&irq_2_ir_lock);
+               printk(KERN_ERR "can't allocate irq_2_iommu\n");
+               return -1;
+       }
+
        irq_iommu->iommu = iommu;
        irq_iommu->irte_index =  index;
        irq_iommu->sub_handle = 0;
@@ -177,6 +244,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
 
        irq_iommu = irq_2_iommu_alloc(irq);
 
+       if (!irq_iommu) {
+               spin_unlock(&irq_2_ir_lock);
+               printk(KERN_ERR "can't allocate irq_2_iommu\n");
+               return -1;
+       }
+
        irq_iommu->iommu = iommu;
        irq_iommu->irte_index = index;
        irq_iommu->sub_handle = subhandle;
index 74801f7df9c901ad21017ce2a831e0cd61c8af1c..11a51f8ed3b3b96ffa72820f195c6a7afa4bfa40 100644 (file)
@@ -103,11 +103,11 @@ static void msix_set_enable(struct pci_dev *dev, int enable)
        }
 }
 
-static void msix_flush_writes(unsigned int irq)
+static void msix_flush_writes(struct irq_desc *desc)
 {
        struct msi_desc *entry;
 
-       entry = get_irq_msi(irq);
+       entry = get_irq_desc_msi(desc);
        BUG_ON(!entry || !entry->dev);
        switch (entry->msi_attrib.type) {
        case PCI_CAP_ID_MSI:
@@ -135,11 +135,11 @@ static void msix_flush_writes(unsigned int irq)
  * Returns 1 if it succeeded in masking the interrupt and 0 if the device
  * doesn't support MSI masking.
  */
-static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
+static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag)
 {
        struct msi_desc *entry;
 
-       entry = get_irq_msi(irq);
+       entry = get_irq_desc_msi(desc);
        BUG_ON(!entry || !entry->dev);
        switch (entry->msi_attrib.type) {
        case PCI_CAP_ID_MSI:
@@ -172,9 +172,9 @@ static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
        return 1;
 }
 
-void read_msi_msg(unsigned int irq, struct msi_msg *msg)
+void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
 {
-       struct msi_desc *entry = get_irq_msi(irq);
+       struct msi_desc *entry = get_irq_desc_msi(desc);
        switch(entry->msi_attrib.type) {
        case PCI_CAP_ID_MSI:
        {
@@ -211,9 +211,16 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
        }
 }
 
-void write_msi_msg(unsigned int irq, struct msi_msg *msg)
+void read_msi_msg(unsigned int irq, struct msi_msg *msg)
 {
-       struct msi_desc *entry = get_irq_msi(irq);
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       read_msi_msg_desc(desc, msg);
+}
+
+void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
+{
+       struct msi_desc *entry = get_irq_desc_msi(desc);
        switch (entry->msi_attrib.type) {
        case PCI_CAP_ID_MSI:
        {
@@ -252,21 +259,31 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
        entry->msg = *msg;
 }
 
+void write_msi_msg(unsigned int irq, struct msi_msg *msg)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       write_msi_msg_desc(desc, msg);
+}
+
 void mask_msi_irq(unsigned int irq)
 {
-       msi_set_mask_bits(irq, 1, 1);
-       msix_flush_writes(irq);
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       msi_set_mask_bits(desc, 1, 1);
+       msix_flush_writes(desc);
 }
 
 void unmask_msi_irq(unsigned int irq)
 {
-       msi_set_mask_bits(irq, 1, 0);
-       msix_flush_writes(irq);
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       msi_set_mask_bits(desc, 1, 0);
+       msix_flush_writes(desc);
 }
 
 static int msi_free_irqs(struct pci_dev* dev);
 
-
 static struct msi_desc* alloc_msi_entry(void)
 {
        struct msi_desc *entry;
@@ -303,9 +320,11 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
        pci_intx_for_msi(dev, 0);
        msi_set_enable(dev, 0);
        write_msi_msg(dev->irq, &entry->msg);
-       if (entry->msi_attrib.maskbit)
-               msi_set_mask_bits(dev->irq, entry->msi_attrib.maskbits_mask,
+       if (entry->msi_attrib.maskbit) {
+               struct irq_desc *desc = irq_to_desc(dev->irq);
+               msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask,
                                  entry->msi_attrib.masked);
+       }
 
        pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
        control &= ~PCI_MSI_FLAGS_QSIZE;
@@ -327,8 +346,9 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
        msix_set_enable(dev, 0);
 
        list_for_each_entry(entry, &dev->msi_list, list) {
+               struct irq_desc *desc = irq_to_desc(entry->irq);
                write_msi_msg(entry->irq, &entry->msg);
-               msi_set_mask_bits(entry->irq, 1, entry->msi_attrib.masked);
+               msi_set_mask_bits(desc, 1, entry->msi_attrib.masked);
        }
 
        BUG_ON(list_empty(&dev->msi_list));
@@ -596,7 +616,8 @@ void pci_msi_shutdown(struct pci_dev* dev)
        /* Return the the pci reset with msi irqs unmasked */
        if (entry->msi_attrib.maskbit) {
                u32 mask = entry->msi_attrib.maskbits_mask;
-               msi_set_mask_bits(dev->irq, mask, ~mask);
+               struct irq_desc *desc = irq_to_desc(dev->irq);
+               msi_set_mask_bits(desc, mask, ~mask);
        }
        if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
                return;
index 448d209a0bf2109fa14af7d3c6a3b00e08c7df1b..e6210725b9ab59615958db9796b8b301297ce1bd 100644 (file)
@@ -112,6 +112,23 @@ static int                 vga_video_font_height;
 static int             vga_scan_lines          __read_mostly;
 static unsigned int    vga_rolled_over;
 
+int vgacon_text_mode_force = 0;
+
+bool vgacon_text_force(void)
+{
+       return vgacon_text_mode_force ? true : false;
+}
+EXPORT_SYMBOL(vgacon_text_force);
+
+static int __init text_mode(char *str)
+{
+       vgacon_text_mode_force = 1;
+       return 1;
+}
+
+/* force text mode - used by kernel modesetting */
+__setup("nomodeset", text_mode);
+
 static int __init no_scroll(char *str)
 {
        /*
index 41d62632dcdb06bfc4b62f3266bb0ad6e8bda7ca..39d5d643a50b9397d67a26f37d3f0572c5561b4c 100644 (file)
@@ -1513,7 +1513,7 @@ static int cyberpro_pci_enable_mmio(struct cfb_info *cfb)
 
        iop = ioremap(0x3000000, 0x5000);
        if (iop == NULL) {
-               prom_printf("iga5000: cannot map I/O\n");
+               printk(KERN_ERR "iga5000: cannot map I/O\n");
                return -ENOMEM;
        }
 
index 1e3b934a4cf707cbe06084f21efb39874d4802f4..46625cd38743c2506e1fc024c8e1c4d434a4fa26 100644 (file)
@@ -141,8 +141,12 @@ static void init_evtchn_cpu_bindings(void)
        int i;
 
        /* By default all event channels notify CPU#0. */
-       for_each_irq_desc(i, desc)
+       for_each_irq_desc(i, desc) {
+               if (!desc)
+                       continue;
+
                desc->affinity = cpumask_of_cpu(0);
+       }
 #endif
 
        memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
@@ -229,15 +233,20 @@ static void unmask_evtchn(int port)
 static int find_unbound_irq(void)
 {
        int irq;
+       struct irq_desc *desc;
 
        /* Only allocate from dynirq range */
-       for_each_irq_nr(irq)
+       for (irq = 0; irq < nr_irqs; irq++)
                if (irq_bindcount[irq] == 0)
                        break;
 
        if (irq == nr_irqs)
                panic("No available IRQ to bind to: increase nr_irqs!\n");
 
+       desc = irq_to_desc_alloc_cpu(irq, 0);
+       if (WARN_ON(desc == NULL))
+               return -1;
+
        return irq;
 }
 
@@ -792,7 +801,7 @@ void xen_irq_resume(void)
                mask_evtchn(evtchn);
 
        /* No IRQ <-> event-channel mappings. */
-       for_each_irq_nr(irq)
+       for (irq = 0; irq < nr_irqs; irq++)
                irq_info[irq].evtchn = 0; /* zap event-channel binding */
 
        for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
@@ -824,7 +833,7 @@ void __init xen_init_IRQ(void)
                mask_evtchn(i);
 
        /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
-       for_each_irq_nr(i)
+       for (i = 0; i < nr_irqs; i++)
                irq_bindcount[i] = 0;
 
        irq_ctx_init(smp_processor_id());
index f658441d5666c9d108f8ec337b2e0909686723bb..d6f89d3c15e80f0ebd15699e62959712fad4b7bc 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -191,6 +191,20 @@ static int aio_setup_ring(struct kioctx *ctx)
        kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
 } while(0)
 
+static void ctx_rcu_free(struct rcu_head *head)
+{
+       struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
+       unsigned nr_events = ctx->max_reqs;
+
+       kmem_cache_free(kioctx_cachep, ctx);
+
+       if (nr_events) {
+               spin_lock(&aio_nr_lock);
+               BUG_ON(aio_nr - nr_events > aio_nr);
+               aio_nr -= nr_events;
+               spin_unlock(&aio_nr_lock);
+       }
+}
 
 /* __put_ioctx
  *     Called when the last user of an aio context has gone away,
@@ -198,8 +212,6 @@ static int aio_setup_ring(struct kioctx *ctx)
  */
 static void __put_ioctx(struct kioctx *ctx)
 {
-       unsigned nr_events = ctx->max_reqs;
-
        BUG_ON(ctx->reqs_active);
 
        cancel_delayed_work(&ctx->wq);
@@ -208,14 +220,7 @@ static void __put_ioctx(struct kioctx *ctx)
        mmdrop(ctx->mm);
        ctx->mm = NULL;
        pr_debug("__put_ioctx: freeing %p\n", ctx);
-       kmem_cache_free(kioctx_cachep, ctx);
-
-       if (nr_events) {
-               spin_lock(&aio_nr_lock);
-               BUG_ON(aio_nr - nr_events > aio_nr);
-               aio_nr -= nr_events;
-               spin_unlock(&aio_nr_lock);
-       }
+       call_rcu(&ctx->rcu_head, ctx_rcu_free);
 }
 
 #define get_ioctx(kioctx) do {                                         \
@@ -235,6 +240,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
 {
        struct mm_struct *mm;
        struct kioctx *ctx;
+       int did_sync = 0;
 
        /* Prevent overflows */
        if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
@@ -267,21 +273,30 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
                goto out_freectx;
 
        /* limit the number of system wide aios */
-       spin_lock(&aio_nr_lock);
-       if (aio_nr + ctx->max_reqs > aio_max_nr ||
-           aio_nr + ctx->max_reqs < aio_nr)
-               ctx->max_reqs = 0;
-       else
-               aio_nr += ctx->max_reqs;
-       spin_unlock(&aio_nr_lock);
+       do {
+               spin_lock_bh(&aio_nr_lock);
+               if (aio_nr + nr_events > aio_max_nr ||
+                   aio_nr + nr_events < aio_nr)
+                       ctx->max_reqs = 0;
+               else
+                       aio_nr += ctx->max_reqs;
+               spin_unlock_bh(&aio_nr_lock);
+               if (ctx->max_reqs || did_sync)
+                       break;
+
+               /* wait for rcu callbacks to have completed before giving up */
+               synchronize_rcu();
+               did_sync = 1;
+               ctx->max_reqs = nr_events;
+       } while (1);
+
        if (ctx->max_reqs == 0)
                goto out_cleanup;
 
        /* now link into global list. */
-       write_lock(&mm->ioctx_list_lock);
-       ctx->next = mm->ioctx_list;
-       mm->ioctx_list = ctx;
-       write_unlock(&mm->ioctx_list_lock);
+       spin_lock(&mm->ioctx_lock);
+       hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
+       spin_unlock(&mm->ioctx_lock);
 
        dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
                ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
@@ -375,11 +390,12 @@ ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
  */
 void exit_aio(struct mm_struct *mm)
 {
-       struct kioctx *ctx = mm->ioctx_list;
-       mm->ioctx_list = NULL;
-       while (ctx) {
-               struct kioctx *next = ctx->next;
-               ctx->next = NULL;
+       struct kioctx *ctx;
+
+       while (!hlist_empty(&mm->ioctx_list)) {
+               ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
+               hlist_del_rcu(&ctx->list);
+
                aio_cancel_all(ctx);
 
                wait_for_all_aios(ctx);
@@ -394,7 +410,6 @@ void exit_aio(struct mm_struct *mm)
                                atomic_read(&ctx->users), ctx->dead,
                                ctx->reqs_active);
                put_ioctx(ctx);
-               ctx = next;
        }
 }
 
@@ -555,19 +570,21 @@ int aio_put_req(struct kiocb *req)
 
 static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 {
-       struct kioctx *ioctx;
-       struct mm_struct *mm;
+       struct mm_struct *mm = current->mm;
+       struct kioctx *ctx = NULL;
+       struct hlist_node *n;
 
-       mm = current->mm;
-       read_lock(&mm->ioctx_list_lock);
-       for (ioctx = mm->ioctx_list; ioctx; ioctx = ioctx->next)
-               if (likely(ioctx->user_id == ctx_id && !ioctx->dead)) {
-                       get_ioctx(ioctx);
+       rcu_read_lock();
+
+       hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
+               if (ctx->user_id == ctx_id && !ctx->dead) {
+                       get_ioctx(ctx);
                        break;
                }
-       read_unlock(&mm->ioctx_list_lock);
+       }
 
-       return ioctx;
+       rcu_read_unlock();
+       return ctx;
 }
 
 /*
@@ -1215,19 +1232,14 @@ out:
 static void io_destroy(struct kioctx *ioctx)
 {
        struct mm_struct *mm = current->mm;
-       struct kioctx **tmp;
        int was_dead;
 
        /* delete the entry from the list is someone else hasn't already */
-       write_lock(&mm->ioctx_list_lock);
+       spin_lock(&mm->ioctx_lock);
        was_dead = ioctx->dead;
        ioctx->dead = 1;
-       for (tmp = &mm->ioctx_list; *tmp && *tmp != ioctx;
-            tmp = &(*tmp)->next)
-               ;
-       if (*tmp)
-               *tmp = ioctx->next;
-       write_unlock(&mm->ioctx_list_lock);
+       hlist_del_rcu(&ioctx->list);
+       spin_unlock(&mm->ioctx_lock);
 
        dprintk("aio_release(%p)\n", ioctx);
        if (likely(!was_dead))
index 19caf7c962ace6c58868ed4fd2196212faf755ef..77ebc3c263d6549b5934017b3adfacd7752dc802 100644 (file)
@@ -111,7 +111,7 @@ void bio_integrity_free(struct bio *bio, struct bio_set *bs)
            && bip->bip_buf != NULL)
                kfree(bip->bip_buf);
 
-       mempool_free(bip->bip_vec, bs->bvec_pools[bip->bip_pool]);
+       bvec_free_bs(bs, bip->bip_vec, bip->bip_pool);
        mempool_free(bip, bs->bio_integrity_pool);
 
        bio->bi_integrity = NULL;
index df99c882b807549f25c30b13416a3ae8ca43d7e0..711cee10360273cd76c535ed346060a7a32f7c29 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
 
 DEFINE_TRACE(block_split);
 
-static struct kmem_cache *bio_slab __read_mostly;
+/*
+ * Test patch to inline a certain number of bi_io_vec's inside the bio
+ * itself, to shrink a bio data allocation from two mempool calls to one
+ */
+#define BIO_INLINE_VECS                4
 
 static mempool_t *bio_split_pool __read_mostly;
 
@@ -40,9 +44,8 @@ static mempool_t *bio_split_pool __read_mostly;
  * break badly! cannot be bigger than what you can fit into an
  * unsigned short
  */
-
 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
-static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
+struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
        BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
 };
 #undef BV
@@ -53,12 +56,121 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
  */
 struct bio_set *fs_bio_set;
 
+/*
+ * Our slab pool management
+ */
+struct bio_slab {
+       struct kmem_cache *slab;
+       unsigned int slab_ref;
+       unsigned int slab_size;
+       char name[8];
+};
+static DEFINE_MUTEX(bio_slab_lock);
+static struct bio_slab *bio_slabs;
+static unsigned int bio_slab_nr, bio_slab_max;
+
+static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
+{
+       unsigned int sz = sizeof(struct bio) + extra_size;
+       struct kmem_cache *slab = NULL;
+       struct bio_slab *bslab;
+       unsigned int i, entry = -1;
+
+       mutex_lock(&bio_slab_lock);
+
+       i = 0;
+       while (i < bio_slab_nr) {
+               struct bio_slab *bslab = &bio_slabs[i];
+
+               if (!bslab->slab && entry == -1)
+                       entry = i;
+               else if (bslab->slab_size == sz) {
+                       slab = bslab->slab;
+                       bslab->slab_ref++;
+                       break;
+               }
+               i++;
+       }
+
+       if (slab)
+               goto out_unlock;
+
+       if (bio_slab_nr == bio_slab_max && entry == -1) {
+               bio_slab_max <<= 1;
+               bio_slabs = krealloc(bio_slabs,
+                                    bio_slab_max * sizeof(struct bio_slab),
+                                    GFP_KERNEL);
+               if (!bio_slabs)
+                       goto out_unlock;
+       }
+       if (entry == -1)
+               entry = bio_slab_nr++;
+
+       bslab = &bio_slabs[entry];
+
+       snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
+       slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
+       if (!slab)
+               goto out_unlock;
+
+       printk("bio: create slab <%s> at %d\n", bslab->name, entry);
+       bslab->slab = slab;
+       bslab->slab_ref = 1;
+       bslab->slab_size = sz;
+out_unlock:
+       mutex_unlock(&bio_slab_lock);
+       return slab;
+}
+
+static void bio_put_slab(struct bio_set *bs)
+{
+       struct bio_slab *bslab = NULL;
+       unsigned int i;
+
+       mutex_lock(&bio_slab_lock);
+
+       for (i = 0; i < bio_slab_nr; i++) {
+               if (bs->bio_slab == bio_slabs[i].slab) {
+                       bslab = &bio_slabs[i];
+                       break;
+               }
+       }
+
+       if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
+               goto out;
+
+       WARN_ON(!bslab->slab_ref);
+
+       if (--bslab->slab_ref)
+               goto out;
+
+       kmem_cache_destroy(bslab->slab);
+       bslab->slab = NULL;
+
+out:
+       mutex_unlock(&bio_slab_lock);
+}
+
 unsigned int bvec_nr_vecs(unsigned short idx)
 {
        return bvec_slabs[idx].nr_vecs;
 }
 
-struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
+void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx)
+{
+       BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
+
+       if (idx == BIOVEC_MAX_IDX)
+               mempool_free(bv, bs->bvec_pool);
+       else {
+               struct biovec_slab *bvs = bvec_slabs + idx;
+
+               kmem_cache_free(bvs->slab, bv);
+       }
+}
+
+struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
+                             struct bio_set *bs)
 {
        struct bio_vec *bvl;
 
@@ -67,60 +179,85 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct
         * If not, this is a bio_kmalloc() allocation and just do a
         * kzalloc() for the exact number of vecs right away.
         */
-       if (bs) {
+       if (!bs)
+               bvl = kmalloc(nr * sizeof(struct bio_vec), gfp_mask);
+
+       /*
+        * see comment near bvec_array define!
+        */
+       switch (nr) {
+       case 1:
+               *idx = 0;
+               break;
+       case 2 ... 4:
+               *idx = 1;
+               break;
+       case 5 ... 16:
+               *idx = 2;
+               break;
+       case 17 ... 64:
+               *idx = 3;
+               break;
+       case 65 ... 128:
+               *idx = 4;
+               break;
+       case 129 ... BIO_MAX_PAGES:
+               *idx = 5;
+               break;
+       default:
+               return NULL;
+       }
+
+       /*
+        * idx now points to the pool we want to allocate from. only the
+        * 1-vec entry pool is mempool backed.
+        */
+       if (*idx == BIOVEC_MAX_IDX) {
+fallback:
+               bvl = mempool_alloc(bs->bvec_pool, gfp_mask);
+       } else {
+               struct biovec_slab *bvs = bvec_slabs + *idx;
+               gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
+
                /*
-                * see comment near bvec_array define!
+                * Make this allocation restricted and don't dump info on
+                * allocation failures, since we'll fallback to the mempool
+                * in case of failure.
                 */
-               switch (nr) {
-               case 1:
-                       *idx = 0;
-                       break;
-               case 2 ... 4:
-                       *idx = 1;
-                       break;
-               case 5 ... 16:
-                       *idx = 2;
-                       break;
-               case 17 ... 64:
-                       *idx = 3;
-                       break;
-               case 65 ... 128:
-                       *idx = 4;
-                       break;
-               case 129 ... BIO_MAX_PAGES:
-                       *idx = 5;
-                       break;
-               default:
-                       return NULL;
-               }
+               __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
 
                /*
-                * idx now points to the pool we want to allocate from
+                * Try a slab allocation. If this fails and __GFP_WAIT
+                * is set, retry with the 1-entry mempool
                 */
-               bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
-               if (bvl)
-                       memset(bvl, 0,
-                               bvec_nr_vecs(*idx) * sizeof(struct bio_vec));
-       } else
-               bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask);
+               bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
+               if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
+                       *idx = BIOVEC_MAX_IDX;
+                       goto fallback;
+               }
+       }
 
        return bvl;
 }
 
-void bio_free(struct bio *bio, struct bio_set *bio_set)
+void bio_free(struct bio *bio, struct bio_set *bs)
 {
-       if (bio->bi_io_vec) {
-               const int pool_idx = BIO_POOL_IDX(bio);
+       void *p;
 
-               BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
-
-               mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
-       }
+       if (bio_has_allocated_vec(bio))
+               bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
 
        if (bio_integrity(bio))
-               bio_integrity_free(bio, bio_set);
+               bio_integrity_free(bio, bs);
+
+       /*
+        * If we have front padding, adjust the bio pointer before freeing
+        */
+       p = bio;
+       if (bs->front_pad)
+               p -= bs->front_pad;
 
-       mempool_free(bio, bio_set->bio_pool);
+       mempool_free(p, bs->bio_pool);
 }
 
 /*
@@ -133,7 +270,8 @@ static void bio_fs_destructor(struct bio *bio)
 
 static void bio_kmalloc_destructor(struct bio *bio)
 {
-       kfree(bio->bi_io_vec);
+       if (bio_has_allocated_vec(bio))
+               kfree(bio->bi_io_vec);
        kfree(bio);
 }
 
@@ -157,16 +295,20 @@ void bio_init(struct bio *bio)
  *   for a &struct bio to become free. If a %NULL @bs is passed in, we will
  *   fall back to just using @kmalloc to allocate the required memory.
  *
- *   allocate bio and iovecs from the memory pools specified by the
- *   bio_set structure, or @kmalloc if none given.
+ *   Note that the caller must set ->bi_destructor on succesful return
+ *   of a bio, to do the appropriate freeing of the bio once the reference
+ *   count drops to zero.
  **/
 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
 {
-       struct bio *bio;
+       struct bio *bio = NULL;
+
+       if (bs) {
+               void *p = mempool_alloc(bs->bio_pool, gfp_mask);
 
-       if (bs)
-               bio = mempool_alloc(bs->bio_pool, gfp_mask);
-       else
+               if (p)
+                       bio = p + bs->front_pad;
+       else
                bio = kmalloc(sizeof(*bio), gfp_mask);
 
        if (likely(bio)) {
@@ -176,7 +318,15 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
                if (likely(nr_iovecs)) {
                        unsigned long uninitialized_var(idx);
 
-                       bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
+                       if (nr_iovecs <= BIO_INLINE_VECS) {
+                               idx = 0;
+                               bvl = bio->bi_inline_vecs;
+                               nr_iovecs = BIO_INLINE_VECS;
+                       } else {
+                               bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx,
+                                                       bs);
+                               nr_iovecs = bvec_nr_vecs(idx);
+                       }
                        if (unlikely(!bvl)) {
                                if (bs)
                                        mempool_free(bio, bs->bio_pool);
@@ -186,7 +336,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
                                goto out;
                        }
                        bio->bi_flags |= idx << BIO_POOL_OFFSET;
-                       bio->bi_max_vecs = bvec_nr_vecs(idx);
+                       bio->bi_max_vecs = nr_iovecs;
                }
                bio->bi_io_vec = bvl;
        }
@@ -1346,30 +1496,18 @@ EXPORT_SYMBOL(bio_sector_offset);
  */
 static int biovec_create_pools(struct bio_set *bs, int pool_entries)
 {
-       int i;
+       struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
 
-       for (i = 0; i < BIOVEC_NR_POOLS; i++) {
-               struct biovec_slab *bp = bvec_slabs + i;
-               mempool_t **bvp = bs->bvec_pools + i;
+       bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab);
+       if (!bs->bvec_pool)
+               return -ENOMEM;
 
-               *bvp = mempool_create_slab_pool(pool_entries, bp->slab);
-               if (!*bvp)
-                       return -ENOMEM;
-       }
        return 0;
 }
 
 static void biovec_free_pools(struct bio_set *bs)
 {
-       int i;
-
-       for (i = 0; i < BIOVEC_NR_POOLS; i++) {
-               mempool_t *bvp = bs->bvec_pools[i];
-
-               if (bvp)
-                       mempool_destroy(bvp);
-       }
-
+       mempool_destroy(bs->bvec_pool);
 }
 
 void bioset_free(struct bio_set *bs)
@@ -1379,25 +1517,49 @@ void bioset_free(struct bio_set *bs)
 
        bioset_integrity_free(bs);
        biovec_free_pools(bs);
+       bio_put_slab(bs);
 
        kfree(bs);
 }
 
-struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
+/**
+ * bioset_create  - Create a bio_set
+ * @pool_size: Number of bio and bio_vecs to cache in the mempool
+ * @front_pad: Number of bytes to allocate in front of the returned bio
+ *
+ * Description:
+ *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
+ *    to ask for a number of bytes to be allocated in front of the bio.
+ *    Front pad allocation is useful for embedding the bio inside
+ *    another structure, to avoid allocating extra data to go with the bio.
+ *    Note that the bio must be embedded at the END of that structure always,
+ *    or things will break badly.
+ */
+struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
 {
-       struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL);
+       unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
+       struct bio_set *bs;
 
+       bs = kzalloc(sizeof(*bs), GFP_KERNEL);
        if (!bs)
                return NULL;
 
-       bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab);
+       bs->front_pad = front_pad;
+
+       bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
+       if (!bs->bio_slab) {
+               kfree(bs);
+               return NULL;
+       }
+
+       bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
        if (!bs->bio_pool)
                goto bad;
 
-       if (bioset_integrity_create(bs, bio_pool_size))
+       if (bioset_integrity_create(bs, pool_size))
                goto bad;
 
-       if (!biovec_create_pools(bs, bvec_pool_size))
+       if (!biovec_create_pools(bs, pool_size))
                return bs;
 
 bad:
@@ -1421,12 +1583,16 @@ static void __init biovec_init_slabs(void)
 
 static int __init init_bio(void)
 {
-       bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+       bio_slab_max = 2;
+       bio_slab_nr = 0;
+       bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
+       if (!bio_slabs)
+               panic("bio: can't allocate bios\n");
 
        bio_integrity_init_slab();
        biovec_init_slabs();
 
-       fs_bio_set = bioset_create(BIO_POOL_SIZE, 2);
+       fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
        if (!fs_bio_set)
                panic("bio: can't allocate bios\n");
 
index 10179cfa11528683495886657bd643ebde590ce8..776ae091d3b0d58052aacb899e0954c3b191ae10 100644 (file)
@@ -99,10 +99,18 @@ __clear_page_buffers(struct page *page)
        page_cache_release(page);
 }
 
+
+static int quiet_error(struct buffer_head *bh)
+{
+       if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
+               return 0;
+       return 1;
+}
+
+
 static void buffer_io_error(struct buffer_head *bh)
 {
        char b[BDEVNAME_SIZE];
-
        printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
                        bdevname(bh->b_bdev, b),
                        (unsigned long long)bh->b_blocknr);
@@ -144,7 +152,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
-               if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
+               if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
                        buffer_io_error(bh);
                        printk(KERN_WARNING "lost page write due to "
                                        "I/O error on %s\n",
@@ -394,7 +402,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
                set_buffer_uptodate(bh);
        } else {
                clear_buffer_uptodate(bh);
-               if (printk_ratelimit())
+               if (!quiet_error(bh))
                        buffer_io_error(bh);
                SetPageError(page);
        }
@@ -455,7 +463,7 @@ static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
-               if (printk_ratelimit()) {
+               if (!quiet_error(bh)) {
                        buffer_io_error(bh);
                        printk(KERN_WARNING "lost page write due to "
                                        "I/O error on %s\n",
@@ -2913,6 +2921,9 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
                set_bit(BH_Eopnotsupp, &bh->b_state);
        }
 
+       if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
+               set_bit(BH_Quiet, &bh->b_state);
+
        bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
        bio_put(bio);
 }
index 1f59ea079cbb80f1910c98a828f0cb32f1d6292c..02d2e120542d76ca26ee233e638749e53bc227f0 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -773,7 +773,6 @@ static int de_thread(struct task_struct *tsk)
        struct signal_struct *sig = tsk->signal;
        struct sighand_struct *oldsighand = tsk->sighand;
        spinlock_t *lock = &oldsighand->siglock;
-       struct task_struct *leader = NULL;
        int count;
 
        if (thread_group_empty(tsk))
@@ -811,7 +810,7 @@ static int de_thread(struct task_struct *tsk)
         * and to assume its PID:
         */
        if (!thread_group_leader(tsk)) {
-               leader = tsk->group_leader;
+               struct task_struct *leader = tsk->group_leader;
 
                sig->notify_count = -1; /* for exit_notify() */
                for (;;) {
@@ -863,8 +862,9 @@ static int de_thread(struct task_struct *tsk)
 
                BUG_ON(leader->exit_state != EXIT_ZOMBIE);
                leader->exit_state = EXIT_DEAD;
-
                write_unlock_irq(&tasklist_lock);
+
+               release_task(leader);
        }
 
        sig->group_exit_task = NULL;
@@ -873,8 +873,6 @@ static int de_thread(struct task_struct *tsk)
 no_thread_group:
        exit_itimers(sig);
        flush_itimer_signals();
-       if (leader)
-               release_task(leader);
 
        if (atomic_read(&oldsighand->count) != 1) {
                struct sighand_struct *newsighand;
index e4a241c65dbeb79d189d84b08010430274c6755f..04158ad74dbbc7192587f8d140d4cb8305bd8e46 100644 (file)
@@ -1721,7 +1721,7 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
        /* small i_blocks in vfs inode? */
        if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
                /*
-                * CONFIG_LSF is not enabled implies the inode
+                * CONFIG_LBD is not enabled implies the inode
                 * i_block represent total blocks in 512 bytes
                 * 32 == size of vfs inode i_blocks * 8
                 */
@@ -1764,7 +1764,7 @@ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
 
        if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
                /*
-                * !has_huge_files or CONFIG_LSF is not enabled
+                * !has_huge_files or CONFIG_LBD is not enabled
                 * implies the inode i_block represent total blocks in
                 * 512 bytes 32 == size of vfs inode i_blocks * 8
                 */
@@ -2021,13 +2021,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        if (has_huge_files) {
                /*
                 * Large file size enabled file system can only be
-                * mount if kernel is build with CONFIG_LSF
+                * mount if kernel is build with CONFIG_LBD
                 */
                if (sizeof(root->i_blocks) < sizeof(u64) &&
                                !(sb->s_flags & MS_RDONLY)) {
                        printk(KERN_ERR "EXT4-fs: %s: Filesystem with huge "
                                        "files cannot be mounted read-write "
-                                       "without CONFIG_LSF.\n", sb->s_id);
+                                       "without CONFIG_LBD.\n", sb->s_id);
                        goto failed_mount;
                }
        }
index 81904f07679d43635cbc6241162d9b45d9a9db62..3bb1cf1e742552ad2f97b6b6206474937ffb0f07 100644 (file)
@@ -44,10 +44,13 @@ static int show_stat(struct seq_file *p, void *v)
                softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
                steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
                guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
-
-               for_each_irq_nr(j)
+               for_each_irq_nr(j) {
+#ifdef CONFIG_SPARSE_IRQ
+                       if (!irq_to_desc(j))
+                               continue;
+#endif
                        sum += kstat_irqs_cpu(j, i);
-
+               }
                sum += arch_irq_stat_cpu(i);
        }
        sum += arch_irq_stat();
@@ -92,7 +95,12 @@ static int show_stat(struct seq_file *p, void *v)
        /* sum again ? it could be updated? */
        for_each_irq_nr(j) {
                per_irq_sum = 0;
-
+#ifdef CONFIG_SPARSE_IRQ
+               if (!irq_to_desc(j)) {
+                       seq_printf(p, " %u", per_irq_sum);
+                       continue;
+               }
+#endif
                for_each_possible_cpu(i)
                        per_irq_sum += kstat_irqs_cpu(j, i);
 
index 4c794d73fb8484e47fb0708beb7a470724707046..8af276361bf26c662bd268fcec2c1431254ce03d 100644 (file)
@@ -41,15 +41,14 @@ struct bug_entry {
 
 #ifndef __WARN
 #ifndef __ASSEMBLY__
-extern void warn_on_slowpath(const char *file, const int line);
 extern void warn_slowpath(const char *file, const int line,
                const char *fmt, ...) __attribute__((format(printf, 3, 4)));
 #define WANT_WARN_ON_SLOWPATH
 #endif
-#define __WARN() warn_on_slowpath(__FILE__, __LINE__)
-#define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg)
+#define __WARN()               warn_slowpath(__FILE__, __LINE__, NULL)
+#define __WARN_printf(arg...)  warn_slowpath(__FILE__, __LINE__, arg)
 #else
-#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
+#define __WARN_printf(arg...)  do { printk(arg); __WARN(); } while (0)
 #endif
 
 #ifndef WARN_ON
index 82b6983b7fbbe8e3bf32092e7a7f899ba85b72f8..b940fdfa3b25284fb0a244b18781709cda90789b 100644 (file)
@@ -1,4 +1,4 @@
-unifdef-y += drm.h drm_sarea.h
+unifdef-y += drm.h drm_sarea.h drm_mode.h
 unifdef-y += i810_drm.h
 unifdef-y += i830_drm.h
 unifdef-y += i915_drm.h
index f46ba4b57da4aedda3e3c4ac170056474bf8c102..32e5096554e9004f78c0d173f45d7036f419cab3 100644 (file)
@@ -173,6 +173,7 @@ enum drm_map_type {
        _DRM_AGP = 3,             /**< AGP/GART */
        _DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
        _DRM_CONSISTENT = 5,      /**< Consistent memory for PCI DMA */
+       _DRM_GEM = 6,             /**< GEM object */
 };
 
 /**
@@ -598,6 +599,8 @@ struct drm_gem_open {
        uint64_t size;
 };
 
+#include "drm_mode.h"
+
 #define DRM_IOCTL_BASE                 'd'
 #define DRM_IO(nr)                     _IO(DRM_IOCTL_BASE,nr)
 #define DRM_IOR(nr,type)               _IOR(DRM_IOCTL_BASE,nr,type)
@@ -634,6 +637,9 @@ struct drm_gem_open {
 #define DRM_IOCTL_SET_SAREA_CTX                DRM_IOW( 0x1c, struct drm_ctx_priv_map)
 #define DRM_IOCTL_GET_SAREA_CTX        DRM_IOWR(0x1d, struct drm_ctx_priv_map)
 
+#define DRM_IOCTL_SET_MASTER            DRM_IO(0x1e)
+#define DRM_IOCTL_DROP_MASTER           DRM_IO(0x1f)
+
 #define DRM_IOCTL_ADD_CTX              DRM_IOWR(0x20, struct drm_ctx)
 #define DRM_IOCTL_RM_CTX               DRM_IOWR(0x21, struct drm_ctx)
 #define DRM_IOCTL_MOD_CTX              DRM_IOW( 0x22, struct drm_ctx)
@@ -664,6 +670,24 @@ struct drm_gem_open {
 
 #define DRM_IOCTL_UPDATE_DRAW          DRM_IOW(0x3f, struct drm_update_draw)
 
+#define DRM_IOCTL_MODE_GETRESOURCES    DRM_IOWR(0xA0, struct drm_mode_card_res)
+#define DRM_IOCTL_MODE_GETCRTC         DRM_IOWR(0xA1, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_SETCRTC         DRM_IOWR(0xA2, struct drm_mode_crtc)
+#define DRM_IOCTL_MODE_CURSOR          DRM_IOWR(0xA3, struct drm_mode_cursor)
+#define DRM_IOCTL_MODE_GETGAMMA                DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_SETGAMMA                DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
+#define DRM_IOCTL_MODE_GETENCODER      DRM_IOWR(0xA6, struct drm_mode_get_encoder)
+#define DRM_IOCTL_MODE_GETCONNECTOR    DRM_IOWR(0xA7, struct drm_mode_get_connector)
+#define DRM_IOCTL_MODE_ATTACHMODE      DRM_IOWR(0xA8, struct drm_mode_mode_cmd)
+#define DRM_IOCTL_MODE_DETACHMODE      DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
+
+#define DRM_IOCTL_MODE_GETPROPERTY     DRM_IOWR(0xAA, struct drm_mode_get_property)
+#define DRM_IOCTL_MODE_SETPROPERTY     DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
+#define DRM_IOCTL_MODE_GETPROPBLOB     DRM_IOWR(0xAC, struct drm_mode_get_blob)
+#define DRM_IOCTL_MODE_GETFB           DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_ADDFB           DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
+#define DRM_IOCTL_MODE_RMFB            DRM_IOWR(0xAF, unsigned int)
+
 /**
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x99.
index d5e8e5c8954825baaf69564e07716dd041ed0593..afb7858c068d1f31ef1a77bbe6abc3ea7c00bfb9 100644 (file)
@@ -105,6 +105,7 @@ struct drm_device;
 #define DRIVER_FB_DMA      0x400
 #define DRIVER_IRQ_VBL2    0x800
 #define DRIVER_GEM         0x1000
+#define DRIVER_MODESET     0x2000
 
 /***********************************************************************/
 /** \name Begin the DRM... */
@@ -238,11 +239,11 @@ struct drm_device;
  */
 #define LOCK_TEST_WITH_RETURN( dev, file_priv )                                \
 do {                                                                   \
-       if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||           \
-            dev->lock.file_priv != file_priv ) {                       \
+       if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock) ||                \
+           file_priv->master->lock.file_priv != file_priv)     {                       \
                DRM_ERROR( "%s called without lock held, held  %d owner %p %p\n",\
-                          __func__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
-                          dev->lock.file_priv, file_priv );            \
+                          __func__, _DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock),\
+                          file_priv->master->lock.file_priv, file_priv);               \
                return -EINVAL;                                         \
        }                                                               \
 } while (0)
@@ -276,6 +277,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
 #define DRM_AUTH       0x1
 #define        DRM_MASTER      0x2
 #define DRM_ROOT_ONLY  0x4
+#define DRM_CONTROL_ALLOW 0x8
 
 struct drm_ioctl_desc {
        unsigned int cmd;
@@ -379,21 +381,26 @@ struct drm_buf_entry {
 /** File private data */
 struct drm_file {
        int authenticated;
-       int master;
        pid_t pid;
        uid_t uid;
        drm_magic_t magic;
        unsigned long ioctl_count;
        struct list_head lhead;
        struct drm_minor *minor;
-       int remove_auth_on_close;
        unsigned long lock_count;
+
        /** Mapping of mm object handles to object pointers. */
        struct idr object_idr;
        /** Lock for synchronization of access to object_idr. */
        spinlock_t table_lock;
+
        struct file *filp;
        void *driver_priv;
+
+       int is_master; /* this file private is a master for a minor */
+       struct drm_master *master; /* master this node is currently associated with
+                                     N.B. not always minor->master */
+       struct list_head fbs;
 };
 
 /** Wait queue */
@@ -523,6 +530,8 @@ struct drm_map_list {
        struct drm_hash_item hash;
        struct drm_map *map;                    /**< mapping */
        uint64_t user_token;
+       struct drm_master *master;
+       struct drm_mm_node *file_offset_node;   /**< fake offset */
 };
 
 typedef struct drm_map drm_local_map_t;
@@ -562,6 +571,14 @@ struct drm_ati_pcigart_info {
        int table_size;
 };
 
+/**
+ * GEM specific mm private for tracking GEM objects
+ */
+struct drm_gem_mm {
+       struct drm_mm offset_manager;   /**< Offset mgmt for buffer objects */
+       struct drm_open_hash offset_hash; /**< User token hash table for maps */
+};
+
 /**
  * This structure defines the drm_mm memory object, which will be used by the
  * DRM for its buffer objects.
@@ -579,6 +596,9 @@ struct drm_gem_object {
        /** File representing the shmem storage */
        struct file *filp;
 
+       /* Mapping info for this object */
+       struct drm_map_list map_list;
+
        /**
         * Size of the object, in bytes.  Immutable over the object's
         * lifetime.
@@ -612,6 +632,33 @@ struct drm_gem_object {
        void *driver_private;
 };
 
+#include "drm_crtc.h"
+
+/* per-master structure */
+struct drm_master {
+
+       struct kref refcount; /* refcount for this master */
+
+       struct list_head head; /**< each minor contains a list of masters */
+       struct drm_minor *minor; /**< link back to minor we are a master for */
+
+       char *unique;                   /**< Unique identifier: e.g., busid */
+       int unique_len;                 /**< Length of unique field */
+       int unique_size;                /**< amount allocated */
+
+       int blocked;                    /**< Blocked due to VC switch? */
+
+       /** \name Authentication */
+       /*@{ */
+       struct drm_open_hash magiclist;
+       struct list_head magicfree;
+       /*@} */
+
+       struct drm_lock_data lock;      /**< Information on hardware lock */
+
+       void *driver_priv; /**< Private structure for driver to use */
+};
+
 /**
  * DRM driver structure. This structure represent the common code for
  * a family of cards. There will one drm_device for each card present
@@ -712,6 +759,10 @@ struct drm_driver {
        void (*set_version) (struct drm_device *dev,
                             struct drm_set_version *sv);
 
+       /* Master routines */
+       int (*master_create)(struct drm_device *dev, struct drm_master *master);
+       void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+
        int (*proc_init)(struct drm_minor *minor);
        void (*proc_cleanup)(struct drm_minor *minor);
 
@@ -724,6 +775,9 @@ struct drm_driver {
        int (*gem_init_object) (struct drm_gem_object *obj);
        void (*gem_free_object) (struct drm_gem_object *obj);
 
+       /* Driver private ops for this object */
+       struct vm_operations_struct *gem_vm_ops;
+
        int major;
        int minor;
        int patchlevel;
@@ -737,10 +791,14 @@ struct drm_driver {
        int num_ioctls;
        struct file_operations fops;
        struct pci_driver pci_driver;
+       /* List of devices hanging off this driver */
+       struct list_head device_list;
 };
 
 #define DRM_MINOR_UNASSIGNED 0
 #define DRM_MINOR_LEGACY 1
+#define DRM_MINOR_CONTROL 2
+#define DRM_MINOR_RENDER 3
 
 /**
  * DRM minor structure. This structure represents a drm minor number.
@@ -752,6 +810,9 @@ struct drm_minor {
        struct device kdev;             /**< Linux device */
        struct drm_device *dev;
        struct proc_dir_entry *dev_root;  /**< proc directory entry */
+       struct drm_master *master; /* currently active master for this node */
+       struct list_head master_list;
+       struct drm_mode_group mode_group;
 };
 
 /**
@@ -759,13 +820,10 @@ struct drm_minor {
  * may contain multiple heads.
  */
 struct drm_device {
-       char *unique;                   /**< Unique identifier: e.g., busid */
-       int unique_len;                 /**< Length of unique field */
+       struct list_head driver_item;   /**< list of devices per driver */
        char *devname;                  /**< For /proc/interrupts */
        int if_version;                 /**< Highest interface version set */
 
-       int blocked;                    /**< Blocked due to VC switch? */
-
        /** \name Locks */
        /*@{ */
        spinlock_t count_lock;          /**< For inuse, drm_device::open_count, drm_device::buf_use */
@@ -788,12 +846,7 @@ struct drm_device {
        atomic_t counts[15];
        /*@} */
 
-       /** \name Authentication */
-       /*@{ */
        struct list_head filelist;
-       struct drm_open_hash magiclist; /**< magic hash table */
-       struct list_head magicfree;
-       /*@} */
 
        /** \name Memory management */
        /*@{ */
@@ -810,7 +863,7 @@ struct drm_device {
        struct idr ctx_idr;
 
        struct list_head vmalist;       /**< List of vmas (for debugging) */
-       struct drm_lock_data lock;      /**< Information on hardware lock */
+
        /*@} */
 
        /** \name DMA queues (contexts) */
@@ -858,6 +911,7 @@ struct drm_device {
        int *vblank_enabled;            /* so we don't call enable more than
                                           once per disable */
        int *vblank_inmodeset;          /* Display driver is setting mode */
+       u32 *last_vblank_wait;          /* Last vblank seqno waited per CRTC */
        struct timer_list vblank_disable_timer;
 
        u32 max_vblank_count;           /**< size of vblank counter register */
@@ -881,12 +935,15 @@ struct drm_device {
        struct drm_sg_mem *sg;  /**< Scatter gather memory */
        int num_crtcs;                  /**< Number of CRTCs on this device */
        void *dev_private;              /**< device private data */
+       void *mm_private;
+       struct address_space *dev_mapping;
        struct drm_sigdata sigdata;        /**< For block_all_signals */
        sigset_t sigmask;
 
        struct drm_driver *driver;
        drm_local_map_t *agp_buffer_map;
        unsigned int agp_buffer_token;
+       struct drm_minor *control;              /**< Control node for card */
        struct drm_minor *primary;              /**< render type primary screen head */
 
        /** \name Drawable information */
@@ -895,6 +952,8 @@ struct drm_device {
        struct idr drw_idr;
        /*@} */
 
+        struct drm_mode_config mode_config;    /**< Current mode config */
+
        /** \name GEM information */
        /*@{ */
        spinlock_t object_name_lock;
@@ -997,6 +1056,8 @@ extern int drm_release(struct inode *inode, struct file *filp);
 
                                /* Mapping support (drm_vm.h) */
 extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
+extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
+extern void drm_vm_open_locked(struct vm_area_struct *vma);
 extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
 extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
 extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
@@ -1153,6 +1214,8 @@ extern int drm_vblank_get(struct drm_device *dev, int crtc);
 extern void drm_vblank_put(struct drm_device *dev, int crtc);
 extern void drm_vblank_cleanup(struct drm_device *dev);
 /* Modesetting support */
+extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
+extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
 extern int drm_modeset_ctl(struct drm_device *dev, void *data,
                           struct drm_file *file_priv);
 
@@ -1189,6 +1252,13 @@ extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
 extern void drm_agp_chipset_flush(struct drm_device *dev);
 
                                /* Stub support (drm_stub.h) */
+extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+                              struct drm_file *file_priv);
+extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv);
+struct drm_master *drm_master_create(struct drm_minor *minor);
+extern struct drm_master *drm_master_get(struct drm_master *master);
+extern void drm_master_put(struct drm_master **master);
 extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
                       struct drm_driver *driver);
 extern int drm_put_dev(struct drm_device *dev);
@@ -1231,7 +1301,11 @@ struct drm_sysfs_class;
 extern struct class *drm_sysfs_create(struct module *owner, char *name);
 extern void drm_sysfs_destroy(void);
 extern int drm_sysfs_device_add(struct drm_minor *minor);
+extern void drm_sysfs_hotplug_event(struct drm_device *dev);
 extern void drm_sysfs_device_remove(struct drm_minor *minor);
+extern char *drm_get_connector_status_name(enum drm_connector_status status);
+extern int drm_sysfs_connector_add(struct drm_connector *connector);
+extern void drm_sysfs_connector_remove(struct drm_connector *connector);
 
 /*
  * Basic memory manager support (drm_mm.c)
@@ -1251,10 +1325,12 @@ extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
 
 /* Graphics Execution Manager library functions (drm_gem.c) */
 int drm_gem_init(struct drm_device *dev);
+void drm_gem_destroy(struct drm_device *dev);
 void drm_gem_object_free(struct kref *kref);
 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
                                            size_t size);
 void drm_gem_object_handle_free(struct kref *kref);
+int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
 static inline void
 drm_gem_object_reference(struct drm_gem_object *obj)
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
new file mode 100644 (file)
index 0000000..0acb07f
--- /dev/null
@@ -0,0 +1,733 @@
+/*
+ * Copyright Â© 2006 Keith Packard
+ * Copyright Â© 2007-2008 Dave Airlie
+ * Copyright Â© 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_CRTC_H__
+#define __DRM_CRTC_H__
+
+#include <linux/i2c.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/idr.h>
+
+#include <linux/fb.h>
+
+struct drm_device;
+struct drm_mode_set;
+struct drm_framebuffer;
+
+
+#define DRM_MODE_OBJECT_CRTC 0xcccccccc
+#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
+#define DRM_MODE_OBJECT_MODE 0xdededede
+#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+
+struct drm_mode_object {
+       uint32_t id;
+       uint32_t type;
+};
+
+/*
+ * Note on terminology:  here, for brevity and convenience, we refer to connector
+ * control chips as 'CRTCs'.  They can control any type of connector, VGA, LVDS,
+ * DVI, etc.  And 'screen' refers to the whole of the visible display, which
+ * may span multiple monitors (and therefore multiple CRTC and connector
+ * structures).
+ */
+
+enum drm_mode_status {
+    MODE_OK    = 0,    /* Mode OK */
+    MODE_HSYNC,                /* hsync out of range */
+    MODE_VSYNC,                /* vsync out of range */
+    MODE_H_ILLEGAL,    /* mode has illegal horizontal timings */
+    MODE_V_ILLEGAL,    /* mode has illegal horizontal timings */
+    MODE_BAD_WIDTH,    /* requires an unsupported linepitch */
+    MODE_NOMODE,       /* no mode with a maching name */
+    MODE_NO_INTERLACE, /* interlaced mode not supported */
+    MODE_NO_DBLESCAN,  /* doublescan mode not supported */
+    MODE_NO_VSCAN,     /* multiscan mode not supported */
+    MODE_MEM,          /* insufficient video memory */
+    MODE_VIRTUAL_X,    /* mode width too large for specified virtual size */
+    MODE_VIRTUAL_Y,    /* mode height too large for specified virtual size */
+    MODE_MEM_VIRT,     /* insufficient video memory given virtual size */
+    MODE_NOCLOCK,      /* no fixed clock available */
+    MODE_CLOCK_HIGH,   /* clock required is too high */
+    MODE_CLOCK_LOW,    /* clock required is too low */
+    MODE_CLOCK_RANGE,  /* clock/mode isn't in a ClockRange */
+    MODE_BAD_HVALUE,   /* horizontal timing was out of range */
+    MODE_BAD_VVALUE,   /* vertical timing was out of range */
+    MODE_BAD_VSCAN,    /* VScan value out of range */
+    MODE_HSYNC_NARROW, /* horizontal sync too narrow */
+    MODE_HSYNC_WIDE,   /* horizontal sync too wide */
+    MODE_HBLANK_NARROW,        /* horizontal blanking too narrow */
+    MODE_HBLANK_WIDE,  /* horizontal blanking too wide */
+    MODE_VSYNC_NARROW, /* vertical sync too narrow */
+    MODE_VSYNC_WIDE,   /* vertical sync too wide */
+    MODE_VBLANK_NARROW,        /* vertical blanking too narrow */
+    MODE_VBLANK_WIDE,  /* vertical blanking too wide */
+    MODE_PANEL,         /* exceeds panel dimensions */
+    MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
+    MODE_ONE_WIDTH,     /* only one width is supported */
+    MODE_ONE_HEIGHT,    /* only one height is supported */
+    MODE_ONE_SIZE,      /* only one resolution is supported */
+    MODE_NO_REDUCED,    /* monitor doesn't accept reduced blanking */
+    MODE_UNVERIFIED = -3, /* mode needs to reverified */
+    MODE_BAD = -2,     /* unspecified reason */
+    MODE_ERROR = -1    /* error condition */
+};
+
+#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
+                                   DRM_MODE_TYPE_CRTC_C)
+
+#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
+       .name = nm, .status = 0, .type = (t), .clock = (c), \
+       .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
+       .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
+       .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
+       .vscan = (vs), .flags = (f), .vrefresh = 0
+
+#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
+
+struct drm_display_mode {
+       /* Header */
+       struct list_head head;
+       struct drm_mode_object base;
+
+       char name[DRM_DISPLAY_MODE_LEN];
+
+       int connector_count;
+       enum drm_mode_status status;
+       int type;
+
+       /* Proposed mode values */
+       int clock;
+       int hdisplay;
+       int hsync_start;
+       int hsync_end;
+       int htotal;
+       int hskew;
+       int vdisplay;
+       int vsync_start;
+       int vsync_end;
+       int vtotal;
+       int vscan;
+       unsigned int flags;
+
+       /* Addressable image size (may be 0 for projectors, etc.) */
+       int width_mm;
+       int height_mm;
+
+       /* Actual mode we give to hw */
+       int clock_index;
+       int synth_clock;
+       int crtc_hdisplay;
+       int crtc_hblank_start;
+       int crtc_hblank_end;
+       int crtc_hsync_start;
+       int crtc_hsync_end;
+       int crtc_htotal;
+       int crtc_hskew;
+       int crtc_vdisplay;
+       int crtc_vblank_start;
+       int crtc_vblank_end;
+       int crtc_vsync_start;
+       int crtc_vsync_end;
+       int crtc_vtotal;
+       int crtc_hadjusted;
+       int crtc_vadjusted;
+
+       /* Driver private mode info */
+       int private_size;
+       int *private;
+       int private_flags;
+
+       int vrefresh;
+       float hsync;
+};
+
+enum drm_connector_status {
+       connector_status_connected = 1,
+       connector_status_disconnected = 2,
+       connector_status_unknown = 3,
+};
+
+enum subpixel_order {
+       SubPixelUnknown = 0,
+       SubPixelHorizontalRGB,
+       SubPixelHorizontalBGR,
+       SubPixelVerticalRGB,
+       SubPixelVerticalBGR,
+       SubPixelNone,
+};
+
+
+/*
+ * Describes a given display (e.g. CRT or flat panel) and its limitations.
+ */
+struct drm_display_info {
+       char name[DRM_DISPLAY_INFO_LEN];
+       /* Input info */
+       bool serration_vsync;
+       bool sync_on_green;
+       bool composite_sync;
+       bool separate_syncs;
+       bool blank_to_black;
+       unsigned char video_level;
+       bool digital;
+       /* Physical size */
+        unsigned int width_mm;
+       unsigned int height_mm;
+
+       /* Display parameters */
+       unsigned char gamma; /* FIXME: storage format */
+       bool gtf_supported;
+       bool standard_color;
+       enum {
+               monochrome = 0,
+               rgb,
+               other,
+               unknown,
+       } display_type;
+       bool active_off_supported;
+       bool suspend_supported;
+       bool standby_supported;
+
+       /* Color info FIXME: storage format */
+       unsigned short redx, redy;
+       unsigned short greenx, greeny;
+       unsigned short bluex, bluey;
+       unsigned short whitex, whitey;
+
+       /* Clock limits FIXME: storage format */
+       unsigned int min_vfreq, max_vfreq;
+       unsigned int min_hfreq, max_hfreq;
+       unsigned int pixel_clock;
+
+       /* White point indices FIXME: storage format */
+       unsigned int wpx1, wpy1;
+       unsigned int wpgamma1;
+       unsigned int wpx2, wpy2;
+       unsigned int wpgamma2;
+
+       enum subpixel_order subpixel_order;
+
+       char *raw_edid; /* if any */
+};
+
+struct drm_framebuffer_funcs {
+       void (*destroy)(struct drm_framebuffer *framebuffer);
+       int (*create_handle)(struct drm_framebuffer *fb,
+                            struct drm_file *file_priv,
+                            unsigned int *handle);
+};
+
+struct drm_framebuffer {
+       struct drm_device *dev;
+       struct list_head head;
+       struct drm_mode_object base;
+       const struct drm_framebuffer_funcs *funcs;
+       unsigned int pitch;
+       unsigned int width;
+       unsigned int height;
+       /* depth can be 15 or 16 */
+       unsigned int depth;
+       int bits_per_pixel;
+       int flags;
+       void *fbdev;
+       u32 pseudo_palette[17];
+       struct list_head filp_head;
+};
+
+struct drm_property_blob {
+       struct drm_mode_object base;
+       struct list_head head;
+       unsigned int length;
+       void *data;
+};
+
+struct drm_property_enum {
+       uint64_t value;
+       struct list_head head;
+       char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_property {
+       struct list_head head;
+       struct drm_mode_object base;
+       uint32_t flags;
+       char name[DRM_PROP_NAME_LEN];
+       uint32_t num_values;
+       uint64_t *values;
+
+       struct list_head enum_blob_list;
+};
+
+struct drm_crtc;
+struct drm_connector;
+struct drm_encoder;
+
+/**
+ * drm_crtc_funcs - control CRTCs for a given device
+ * @dpms: control display power levels
+ * @save: save CRTC state
+ * @resore: restore CRTC state
+ * @lock: lock the CRTC
+ * @unlock: unlock the CRTC
+ * @shadow_allocate: allocate shadow pixmap
+ * @shadow_create: create shadow pixmap for rotation support
+ * @shadow_destroy: free shadow pixmap
+ * @mode_fixup: fixup proposed mode
+ * @mode_set: set the desired mode on the CRTC
+ * @gamma_set: specify color ramp for CRTC
+ * @destroy: deinit and free object.
+ *
+ * The drm_crtc_funcs structure is the central CRTC management structure
+ * in the DRM.  Each CRTC controls one or more connectors (note that the name
+ * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc.
+ * connectors, not just CRTs).
+ *
+ * Each driver is responsible for filling out this structure at startup time,
+ * in addition to providing other modesetting features, like i2c and DDC
+ * bus accessors.
+ */
+struct drm_crtc_funcs {
+       /* Save CRTC state */
+       void (*save)(struct drm_crtc *crtc); /* suspend? */
+       /* Restore CRTC state */
+       void (*restore)(struct drm_crtc *crtc); /* resume? */
+
+       /* cursor controls */
+       int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
+                         uint32_t handle, uint32_t width, uint32_t height);
+       int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
+
+       /* Set gamma on the CRTC */
+       void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+                         uint32_t size);
+       /* Object destroy routine */
+       void (*destroy)(struct drm_crtc *crtc);
+
+       int (*set_config)(struct drm_mode_set *set);
+};
+
+/**
+ * drm_crtc - central CRTC control structure
+ * @enabled: is this CRTC enabled?
+ * @x: x position on screen
+ * @y: y position on screen
+ * @desired_mode: new desired mode
+ * @desired_x: desired x for desired_mode
+ * @desired_y: desired y for desired_mode
+ * @funcs: CRTC control functions
+ *
+ * Each CRTC may have one or more connectors associated with it.  This structure
+ * allows the CRTC to be controlled.
+ */
+struct drm_crtc {
+       struct drm_device *dev;
+       struct list_head head;
+
+       struct drm_mode_object base;
+
+       /* framebuffer the connector is currently bound to */
+       struct drm_framebuffer *fb;
+
+       bool enabled;
+
+       struct drm_display_mode mode;
+
+       int x, y;
+       struct drm_display_mode *desired_mode;
+       int desired_x, desired_y;
+       const struct drm_crtc_funcs *funcs;
+
+       /* CRTC gamma size for reporting to userspace */
+       uint32_t gamma_size;
+       uint16_t *gamma_store;
+
+       /* if you are using the helper */
+       void *helper_private;
+};
+
+
+/**
+ * drm_connector_funcs - control connectors on a given device
+ * @dpms: set power state (see drm_crtc_funcs above)
+ * @save: save connector state
+ * @restore: restore connector state
+ * @mode_valid: is this mode valid on the given connector?
+ * @mode_fixup: try to fixup proposed mode for this connector
+ * @mode_set: set this mode
+ * @detect: is this connector active?
+ * @get_modes: get mode list for this connector
+ * @set_property: property for this connector may need update
+ * @destroy: make object go away
+ *
+ * Each CRTC may have one or more connectors attached to it.  The functions
+ * below allow the core DRM code to control connectors, enumerate available modes,
+ * etc.
+ */
+struct drm_connector_funcs {
+       void (*dpms)(struct drm_connector *connector, int mode);
+       void (*save)(struct drm_connector *connector);
+       void (*restore)(struct drm_connector *connector);
+       enum drm_connector_status (*detect)(struct drm_connector *connector);
+       void (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
+       int (*set_property)(struct drm_connector *connector, struct drm_property *property,
+                            uint64_t val);
+       void (*destroy)(struct drm_connector *connector);
+};
+
+struct drm_encoder_funcs {
+       void (*destroy)(struct drm_encoder *encoder);
+};
+
+#define DRM_CONNECTOR_MAX_UMODES 16
+#define DRM_CONNECTOR_MAX_PROPERTY 16
+#define DRM_CONNECTOR_LEN 32
+#define DRM_CONNECTOR_MAX_ENCODER 2
+
+/**
+ * drm_encoder - central DRM encoder structure
+ */
+struct drm_encoder {
+       struct drm_device *dev;
+       struct list_head head;
+
+       struct drm_mode_object base;
+       int encoder_type;
+       uint32_t possible_crtcs;
+       uint32_t possible_clones;
+
+       struct drm_crtc *crtc;
+       const struct drm_encoder_funcs *funcs;
+       void *helper_private;
+};
+
+/**
+ * drm_connector - central DRM connector control structure
+ * @crtc: CRTC this connector is currently connected to, NULL if none
+ * @interlace_allowed: can this connector handle interlaced modes?
+ * @doublescan_allowed: can this connector handle doublescan?
+ * @available_modes: modes available on this connector (from get_modes() + user)
+ * @initial_x: initial x position for this connector
+ * @initial_y: initial y position for this connector
+ * @status: connector connected?
+ * @funcs: connector control functions
+ *
+ * Each connector may be connected to one or more CRTCs, or may be clonable by
+ * another connector if they can share a CRTC.  Each connector also has a specific
+ * position in the broader display (referred to as a 'screen' though it could
+ * span multiple monitors).
+ */
+struct drm_connector {
+       struct drm_device *dev;
+       struct device kdev;
+       struct device_attribute *attr;
+       struct list_head head;
+
+       struct drm_mode_object base;
+
+       int connector_type;
+       int connector_type_id;
+       bool interlace_allowed;
+       bool doublescan_allowed;
+       struct list_head modes; /* list of modes on this connector */
+
+       int initial_x, initial_y;
+       enum drm_connector_status status;
+
+       /* these are modes added by probing with DDC or the BIOS */
+       struct list_head probed_modes;
+
+       struct drm_display_info display_info;
+       const struct drm_connector_funcs *funcs;
+
+       struct list_head user_modes;
+       struct drm_property_blob *edid_blob_ptr;
+       u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
+       uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
+
+       void *helper_private;
+
+       uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+       uint32_t force_encoder_id;
+       struct drm_encoder *encoder; /* currently active encoder */
+};
+
+/**
+ * struct drm_mode_set
+ *
+ * Represents a single crtc the connectors that it drives with what mode
+ * and from which framebuffer it scans out from.
+ *
+ * This is used to set modes.
+ */
+struct drm_mode_set {
+       struct list_head head;
+
+       struct drm_framebuffer *fb;
+       struct drm_crtc *crtc;
+       struct drm_display_mode *mode;
+
+       uint32_t x;
+       uint32_t y;
+
+       struct drm_connector **connectors;
+       size_t num_connectors;
+};
+
+/**
+ * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
+ * @resize: adjust CRTCs as necessary for the proposed layout
+ *
+ * Currently only a resize hook is available.  DRM will call back into the
+ * driver with a new screen width and height.  If the driver can't support
+ * the proposed size, it can return false.  Otherwise it should adjust
+ * the CRTC<->connector mappings as needed and update its view of the screen.
+ */
+struct drm_mode_config_funcs {
+       struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd);
+       int (*fb_changed)(struct drm_device *dev);
+};
+
+struct drm_mode_group {
+       uint32_t num_crtcs;
+       uint32_t num_encoders;
+       uint32_t num_connectors;
+
+       /* list of object IDs for this group */
+       uint32_t *id_list;
+};
+
+/**
+ * drm_mode_config - Mode configuration control structure
+ *
+ */
+struct drm_mode_config {
+       struct mutex mutex; /* protects configuration and IDR */
+       struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
+       /* this is limited to one for now */
+       int num_fb;
+       struct list_head fb_list;
+       int num_connector;
+       struct list_head connector_list;
+       int num_encoder;
+       struct list_head encoder_list;
+
+       int num_crtc;
+       struct list_head crtc_list;
+
+       struct list_head property_list;
+
+       /* in-kernel framebuffers - hung of filp_head in drm_framebuffer */
+       struct list_head fb_kernel_list;
+
+       int min_width, min_height;
+       int max_width, max_height;
+       struct drm_mode_config_funcs *funcs;
+       unsigned long fb_base;
+
+       /* pointers to standard properties */
+       struct list_head property_blob_list;
+       struct drm_property *edid_property;
+       struct drm_property *dpms_property;
+
+       /* DVI-I properties */
+       struct drm_property *dvi_i_subconnector_property;
+       struct drm_property *dvi_i_select_subconnector_property;
+
+       /* TV properties */
+       struct drm_property *tv_subconnector_property;
+       struct drm_property *tv_select_subconnector_property;
+       struct drm_property *tv_mode_property;
+       struct drm_property *tv_left_margin_property;
+       struct drm_property *tv_right_margin_property;
+       struct drm_property *tv_top_margin_property;
+       struct drm_property *tv_bottom_margin_property;
+
+       /* Optional properties */
+       struct drm_property *scaling_mode_property;
+       struct drm_property *dithering_mode_property;
+};
+
+#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
+#define obj_to_connector(x) container_of(x, struct drm_connector, base)
+#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
+#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
+#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
+#define obj_to_property(x) container_of(x, struct drm_property, base)
+#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
+
+
+extern void drm_crtc_init(struct drm_device *dev,
+                         struct drm_crtc *crtc,
+                         const struct drm_crtc_funcs *funcs);
+extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+
+extern void drm_connector_init(struct drm_device *dev,
+                           struct drm_connector *connector,
+                           const struct drm_connector_funcs *funcs,
+                           int connector_type);
+
+extern void drm_connector_cleanup(struct drm_connector *connector);
+
+extern void drm_encoder_init(struct drm_device *dev,
+                            struct drm_encoder *encoder,
+                            const struct drm_encoder_funcs *funcs,
+                            int encoder_type);
+
+extern void drm_encoder_cleanup(struct drm_encoder *encoder);
+
+extern char *drm_get_connector_name(struct drm_connector *connector);
+extern char *drm_get_dpms_name(int val);
+extern char *drm_get_dvi_i_subconnector_name(int val);
+extern char *drm_get_dvi_i_select_name(int val);
+extern char *drm_get_tv_subconnector_name(int val);
+extern char *drm_get_tv_select_name(int val);
+extern void drm_fb_release(struct file *filp);
+extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
+extern struct edid *drm_get_edid(struct drm_connector *connector,
+                                struct i2c_adapter *adapter);
+extern unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter);
+extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
+extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
+extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+                                                  struct drm_display_mode *mode);
+extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
+extern void drm_mode_config_init(struct drm_device *dev);
+extern void drm_mode_config_cleanup(struct drm_device *dev);
+extern void drm_mode_set_name(struct drm_display_mode *mode);
+extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
+extern int drm_mode_width(struct drm_display_mode *mode);
+extern int drm_mode_height(struct drm_display_mode *mode);
+
+/* for us by fb module */
+extern int drm_mode_attachmode_crtc(struct drm_device *dev,
+                                   struct drm_crtc *crtc,
+                                   struct drm_display_mode *mode);
+extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
+
+extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
+extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
+extern void drm_mode_list_concat(struct list_head *head,
+                                struct list_head *new);
+extern void drm_mode_validate_size(struct drm_device *dev,
+                                  struct list_head *mode_list,
+                                  int maxX, int maxY, int maxPitch);
+extern void drm_mode_prune_invalid(struct drm_device *dev,
+                                  struct list_head *mode_list, bool verbose);
+extern void drm_mode_sort(struct list_head *mode_list);
+extern int drm_mode_vrefresh(struct drm_display_mode *mode);
+extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
+                                 int adjust_flags);
+extern void drm_mode_connector_list_update(struct drm_connector *connector);
+extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+                                               struct edid *edid);
+extern int drm_connector_property_set_value(struct drm_connector *connector,
+                                        struct drm_property *property,
+                                        uint64_t value);
+extern int drm_connector_property_get_value(struct drm_connector *connector,
+                                        struct drm_property *property,
+                                        uint64_t *value);
+extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
+extern void drm_framebuffer_set_object(struct drm_device *dev,
+                                      unsigned long handle);
+extern int drm_framebuffer_init(struct drm_device *dev,
+                               struct drm_framebuffer *fb,
+                               const struct drm_framebuffer_funcs *funcs);
+extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
+extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
+extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
+extern bool drm_crtc_in_use(struct drm_crtc *crtc);
+
+extern int drm_connector_attach_property(struct drm_connector *connector,
+                                     struct drm_property *property, uint64_t init_val);
+extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+                                               const char *name, int num_values);
+extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
+extern int drm_property_add_enum(struct drm_property *property, int index,
+                                uint64_t value, const char *name);
+extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
+extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
+                                    char *formats[]);
+extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+extern int drm_mode_create_dithering_property(struct drm_device *dev);
+extern char *drm_get_encoder_name(struct drm_encoder *encoder);
+
+extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+                                            struct drm_encoder *encoder);
+extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+                                          struct drm_encoder *encoder);
+extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+                                        int gamma_size);
+extern void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type);
+/* IOCTLs */
+extern int drm_mode_getresources(struct drm_device *dev,
+                                void *data, struct drm_file *file_priv);
+
+extern int drm_mode_getcrtc(struct drm_device *dev,
+                           void *data, struct drm_file *file_priv);
+extern int drm_mode_getconnector(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv);
+extern int drm_mode_setcrtc(struct drm_device *dev,
+                           void *data, struct drm_file *file_priv);
+extern int drm_mode_cursor_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv);
+extern int drm_mode_rmfb(struct drm_device *dev,
+                        void *data, struct drm_file *file_priv);
+extern int drm_mode_getfb(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv);
+extern int drm_mode_addmode_ioctl(struct drm_device *dev,
+                                 void *data, struct drm_file *file_priv);
+extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
+                                void *data, struct drm_file *file_priv);
+extern int drm_mode_attachmode_ioctl(struct drm_device *dev,
+                                    void *data, struct drm_file *file_priv);
+extern int drm_mode_detachmode_ioctl(struct drm_device *dev,
+                                    void *data, struct drm_file *file_priv);
+
+extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
+                                     void *data, struct drm_file *file_priv);
+extern int drm_mode_getblob_ioctl(struct drm_device *dev,
+                                 void *data, struct drm_file *file_priv);
+extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+                                             void *data, struct drm_file *file_priv);
+extern int drm_mode_hotplug_ioctl(struct drm_device *dev,
+                                 void *data, struct drm_file *file_priv);
+extern int drm_mode_replacefb(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv);
+extern int drm_mode_getencoder(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv);
+extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+                                   void *data, struct drm_file *file_priv);
+extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+                                   void *data, struct drm_file *file_priv);
+#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
new file mode 100644 (file)
index 0000000..4bc04cf
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright Â© 2006 Keith Packard
+ * Copyright Â© 2007-2008 Dave Airlie
+ * Copyright Â© 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * The DRM mode setting helper functions are common code for drivers to use if
+ * they wish.  Drivers are not forced to use this code in their
+ * implementations but it would be useful if they code they do use at least
+ * provides a consistent interface and operation to userspace
+ */
+
+#ifndef __DRM_CRTC_HELPER_H__
+#define __DRM_CRTC_HELPER_H__
+
+#include <linux/i2c.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/idr.h>
+
+#include <linux/fb.h>
+
+struct drm_crtc_helper_funcs {
+       /*
+        * Control power levels on the CRTC.  If the mode passed in is
+        * unsupported, the provider must use the next lowest power level.
+        */
+       void (*dpms)(struct drm_crtc *crtc, int mode);
+       void (*prepare)(struct drm_crtc *crtc);
+       void (*commit)(struct drm_crtc *crtc);
+
+       /* Provider can fixup or change mode timings before modeset occurs */
+       bool (*mode_fixup)(struct drm_crtc *crtc,
+                          struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode);
+       /* Actually set the mode */
+       void (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
+                        struct drm_display_mode *adjusted_mode, int x, int y,
+                        struct drm_framebuffer *old_fb);
+
+       /* Move the crtc on the current fb to the given position *optional* */
+       void (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
+                             struct drm_framebuffer *old_fb);
+};
+
+struct drm_encoder_helper_funcs {
+       void (*dpms)(struct drm_encoder *encoder, int mode);
+       void (*save)(struct drm_encoder *encoder);
+       void (*restore)(struct drm_encoder *encoder);
+
+       bool (*mode_fixup)(struct drm_encoder *encoder,
+                          struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode);
+       void (*prepare)(struct drm_encoder *encoder);
+       void (*commit)(struct drm_encoder *encoder);
+       void (*mode_set)(struct drm_encoder *encoder,
+                        struct drm_display_mode *mode,
+                        struct drm_display_mode *adjusted_mode);
+       /* detect for DAC style encoders */
+       enum drm_connector_status (*detect)(struct drm_encoder *encoder,
+                                           struct drm_connector *connector);
+};
+
+struct drm_connector_helper_funcs {
+       int (*get_modes)(struct drm_connector *connector);
+       int (*mode_valid)(struct drm_connector *connector,
+                         struct drm_display_mode *mode);
+       struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
+};
+
+extern void drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
+extern void drm_helper_disable_unused_functions(struct drm_device *dev);
+extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
+extern bool drm_helper_initial_config(struct drm_device *dev, bool can_grow);
+extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
+extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+                                    struct drm_display_mode *mode,
+                                    int x, int y,
+                                    struct drm_framebuffer *old_fb);
+extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
+
+extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+                                         struct drm_mode_fb_cmd *mode_cmd);
+
+static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
+                                      const struct drm_crtc_helper_funcs *funcs)
+{
+       crtc->helper_private = (void *)funcs;
+}
+
+static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
+                                         const struct drm_encoder_helper_funcs *funcs)
+{
+       encoder->helper_private = (void *)funcs;
+}
+
+static inline void drm_connector_helper_add(struct drm_connector *connector,
+                                           const struct drm_connector_helper_funcs *funcs)
+{
+       connector->helper_private = (void *)funcs;
+}
+
+extern int drm_helper_resume_force_mode(struct drm_device *dev);
+#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
new file mode 100644 (file)
index 0000000..c707c15
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Copyright Â© 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_EDID_H__
+#define __DRM_EDID_H__
+
+#include <linux/types.h>
+
+#define EDID_LENGTH 128
+#define DDC_ADDR 0x50
+
+#ifdef BIG_ENDIAN
+#error "EDID structure is little endian, need big endian versions"
+#else
+
+struct est_timings {
+       u8 t1;
+       u8 t2;
+       u8 mfg_rsvd;
+} __attribute__((packed));
+
+struct std_timing {
+       u8 hsize; /* need to multiply by 8 then add 248 */
+       u8 vfreq:6; /* need to add 60 */
+       u8 aspect_ratio:2; /* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
+} __attribute__((packed));
+
+/* If detailed data is pixel timing */
+struct detailed_pixel_timing {
+       u8 hactive_lo;
+       u8 hblank_lo;
+       u8 hblank_hi:4;
+       u8 hactive_hi:4;
+       u8 vactive_lo;
+       u8 vblank_lo;
+       u8 vblank_hi:4;
+       u8 vactive_hi:4;
+       u8 hsync_offset_lo;
+       u8 hsync_pulse_width_lo;
+       u8 vsync_pulse_width_lo:4;
+       u8 vsync_offset_lo:4;
+       u8 hsync_pulse_width_hi:2;
+       u8 hsync_offset_hi:2;
+       u8 vsync_pulse_width_hi:2;
+       u8 vsync_offset_hi:2;
+       u8 width_mm_lo;
+       u8 height_mm_lo;
+       u8 height_mm_hi:4;
+       u8 width_mm_hi:4;
+       u8 hborder;
+       u8 vborder;
+       u8 unknown0:1;
+       u8 vsync_positive:1;
+       u8 hsync_positive:1;
+       u8 separate_sync:2;
+       u8 stereo:1;
+       u8 unknown6:1;
+       u8 interlaced:1;
+} __attribute__((packed));
+
+/* If it's not pixel timing, it'll be one of the below */
+struct detailed_data_string {
+       u8 str[13];
+} __attribute__((packed));
+
+struct detailed_data_monitor_range {
+       u8 min_vfreq;
+       u8 max_vfreq;
+       u8 min_hfreq_khz;
+       u8 max_hfreq_khz;
+       u8 pixel_clock_mhz; /* need to multiply by 10 */
+       u16 sec_gtf_toggle; /* A000=use above, 20=use below */ /* FIXME: byte order */
+       u8 hfreq_start_khz; /* need to multiply by 2 */
+       u8 c; /* need to divide by 2 */
+       u16 m; /* FIXME: byte order */
+       u8 k;
+       u8 j; /* need to divide by 2 */
+} __attribute__((packed));
+
+struct detailed_data_wpindex {
+       u8 white_y_lo:2;
+       u8 white_x_lo:2;
+       u8 pad:4;
+       u8 white_x_hi;
+       u8 white_y_hi;
+       u8 gamma; /* need to divide by 100 then add 1 */
+} __attribute__((packed));
+
+struct detailed_data_color_point {
+       u8 windex1;
+       u8 wpindex1[3];
+       u8 windex2;
+       u8 wpindex2[3];
+} __attribute__((packed));
+
+struct detailed_non_pixel {
+       u8 pad1;
+       u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
+                   fb=color point data, fa=standard timing data,
+                   f9=undefined, f8=mfg. reserved */
+       u8 pad2;
+       union {
+               struct detailed_data_string str;
+               struct detailed_data_monitor_range range;
+               struct detailed_data_wpindex color;
+               struct std_timing timings[5];
+       } data;
+} __attribute__((packed));
+
+#define EDID_DETAIL_STD_MODES 0xfa
+#define EDID_DETAIL_MONITOR_CPDATA 0xfb
+#define EDID_DETAIL_MONITOR_NAME 0xfc
+#define EDID_DETAIL_MONITOR_RANGE 0xfd
+#define EDID_DETAIL_MONITOR_STRING 0xfe
+#define EDID_DETAIL_MONITOR_SERIAL 0xff
+
+struct detailed_timing {
+       u16 pixel_clock; /* need to multiply by 10 KHz */ /* FIXME: byte order */
+       union {
+               struct detailed_pixel_timing pixel_data;
+               struct detailed_non_pixel other_data;
+       } data;
+} __attribute__((packed));
+
+struct edid {
+       u8 header[8];
+       /* Vendor & product info */
+       u8 mfg_id[2];
+       u8 prod_code[2];
+       u32 serial; /* FIXME: byte order */
+       u8 mfg_week;
+       u8 mfg_year;
+       /* EDID version */
+       u8 version;
+       u8 revision;
+       /* Display info: */
+       /*   input definition */
+       u8 serration_vsync:1;
+       u8 sync_on_green:1;
+       u8 composite_sync:1;
+       u8 separate_syncs:1;
+       u8 blank_to_black:1;
+       u8 video_level:2;
+       u8 digital:1; /* bits below must be zero if set */
+       u8 width_cm;
+       u8 height_cm;
+       u8 gamma;
+       /*   feature support */
+       u8 default_gtf:1;
+       u8 preferred_timing:1;
+       u8 standard_color:1;
+       u8 display_type:2; /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
+       u8 pm_active_off:1;
+       u8 pm_suspend:1;
+       u8 pm_standby:1;
+       /* Color characteristics */
+       u8 red_green_lo;
+       u8 black_white_lo;
+       u8 red_x;
+       u8 red_y;
+       u8 green_x;
+       u8 green_y;
+       u8 blue_x;
+       u8 blue_y;
+       u8 white_x;
+       u8 white_y;
+       /* Est. timings and mfg rsvd timings*/
+       struct est_timings established_timings;
+       /* Standard timings 1-8*/
+       struct std_timing standard_timings[8];
+       /* Detailing timings 1-4 */
+       struct detailed_timing detailed_timings[4];
+       /* Number of 128 byte ext. blocks */
+       u8 extensions;
+       /* Checksum */
+       u8 checksum;
+} __attribute__((packed));
+
+#endif /* little endian structs */
+
+#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
+
+#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
new file mode 100644 (file)
index 0000000..601d2bd
--- /dev/null
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
+ * Copyright (c) 2008 Red Hat Inc.
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * Copyright (c) 2007-2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_MODE_H
+#define _DRM_MODE_H
+
+#if !defined(__KERNEL__) && !defined(_KERNEL)
+#include <stdint.h>
+#else
+#include <linux/kernel.h>
+#endif
+
+#define DRM_DISPLAY_INFO_LEN   32
+#define DRM_CONNECTOR_NAME_LEN 32
+#define DRM_DISPLAY_MODE_LEN   32
+#define DRM_PROP_NAME_LEN      32
+
+#define DRM_MODE_TYPE_BUILTIN  (1<<0)
+#define DRM_MODE_TYPE_CLOCK_C  ((1<<1) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_CRTC_C   ((1<<2) | DRM_MODE_TYPE_BUILTIN)
+#define DRM_MODE_TYPE_PREFERRED        (1<<3)
+#define DRM_MODE_TYPE_DEFAULT  (1<<4)
+#define DRM_MODE_TYPE_USERDEF  (1<<5)
+#define DRM_MODE_TYPE_DRIVER   (1<<6)
+
+/* Video mode flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_FLAG_PHSYNC   (1<<0)
+#define DRM_MODE_FLAG_NHSYNC   (1<<1)
+#define DRM_MODE_FLAG_PVSYNC   (1<<2)
+#define DRM_MODE_FLAG_NVSYNC   (1<<3)
+#define DRM_MODE_FLAG_INTERLACE        (1<<4)
+#define DRM_MODE_FLAG_DBLSCAN  (1<<5)
+#define DRM_MODE_FLAG_CSYNC    (1<<6)
+#define DRM_MODE_FLAG_PCSYNC   (1<<7)
+#define DRM_MODE_FLAG_NCSYNC   (1<<8)
+#define DRM_MODE_FLAG_HSKEW    (1<<9) /* hskew provided */
+#define DRM_MODE_FLAG_BCAST    (1<<10)
+#define DRM_MODE_FLAG_PIXMUX   (1<<11)
+#define DRM_MODE_FLAG_DBLCLK   (1<<12)
+#define DRM_MODE_FLAG_CLKDIV2  (1<<13)
+
+/* DPMS flags */
+/* bit compatible with the xorg definitions. */
+#define DRM_MODE_DPMS_ON       0
+#define DRM_MODE_DPMS_STANDBY  1
+#define DRM_MODE_DPMS_SUSPEND  2
+#define DRM_MODE_DPMS_OFF      3
+
+/* Scaling mode options */
+#define DRM_MODE_SCALE_NON_GPU         0
+#define DRM_MODE_SCALE_FULLSCREEN      1
+#define DRM_MODE_SCALE_NO_SCALE                2
+#define DRM_MODE_SCALE_ASPECT          3
+
+/* Dithering mode options */
+#define DRM_MODE_DITHERING_OFF 0
+#define DRM_MODE_DITHERING_ON  1
+
+struct drm_mode_modeinfo {
+       uint32_t clock;
+       uint16_t hdisplay, hsync_start, hsync_end, htotal, hskew;
+       uint16_t vdisplay, vsync_start, vsync_end, vtotal, vscan;
+
+       uint32_t vrefresh; /* vertical refresh * 1000 */
+
+       uint32_t flags;
+       uint32_t type;
+       char name[DRM_DISPLAY_MODE_LEN];
+};
+
+struct drm_mode_card_res {
+       uint64_t fb_id_ptr;
+       uint64_t crtc_id_ptr;
+       uint64_t connector_id_ptr;
+       uint64_t encoder_id_ptr;
+       uint32_t count_fbs;
+       uint32_t count_crtcs;
+       uint32_t count_connectors;
+       uint32_t count_encoders;
+       uint32_t min_width, max_width;
+       uint32_t min_height, max_height;
+};
+
+struct drm_mode_crtc {
+       uint64_t set_connectors_ptr;
+       uint32_t count_connectors;
+
+       uint32_t crtc_id; /**< Id */
+       uint32_t fb_id; /**< Id of framebuffer */
+
+       uint32_t x, y; /**< Position on the frameuffer */
+
+       uint32_t gamma_size;
+       uint32_t mode_valid;
+       struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_ENCODER_NONE  0
+#define DRM_MODE_ENCODER_DAC   1
+#define DRM_MODE_ENCODER_TMDS  2
+#define DRM_MODE_ENCODER_LVDS  3
+#define DRM_MODE_ENCODER_TVDAC 4
+
+struct drm_mode_get_encoder {
+       uint32_t encoder_id;
+       uint32_t encoder_type;
+
+       uint32_t crtc_id; /**< Id of crtc */
+
+       uint32_t possible_crtcs;
+       uint32_t possible_clones;
+};
+
+/* This is for connectors with multiple signal types. */
+/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
+#define DRM_MODE_SUBCONNECTOR_Automatic        0
+#define DRM_MODE_SUBCONNECTOR_Unknown  0
+#define DRM_MODE_SUBCONNECTOR_DVID     3
+#define DRM_MODE_SUBCONNECTOR_DVIA     4
+#define DRM_MODE_SUBCONNECTOR_Composite        5
+#define DRM_MODE_SUBCONNECTOR_SVIDEO   6
+#define DRM_MODE_SUBCONNECTOR_Component        8
+
+#define DRM_MODE_CONNECTOR_Unknown     0
+#define DRM_MODE_CONNECTOR_VGA         1
+#define DRM_MODE_CONNECTOR_DVII                2
+#define DRM_MODE_CONNECTOR_DVID                3
+#define DRM_MODE_CONNECTOR_DVIA                4
+#define DRM_MODE_CONNECTOR_Composite   5
+#define DRM_MODE_CONNECTOR_SVIDEO      6
+#define DRM_MODE_CONNECTOR_LVDS                7
+#define DRM_MODE_CONNECTOR_Component   8
+#define DRM_MODE_CONNECTOR_9PinDIN     9
+#define DRM_MODE_CONNECTOR_DisplayPort 10
+#define DRM_MODE_CONNECTOR_HDMIA       11
+#define DRM_MODE_CONNECTOR_HDMIB       12
+
+struct drm_mode_get_connector {
+
+       uint64_t encoders_ptr;
+       uint64_t modes_ptr;
+       uint64_t props_ptr;
+       uint64_t prop_values_ptr;
+
+       uint32_t count_modes;
+       uint32_t count_props;
+       uint32_t count_encoders;
+
+       uint32_t encoder_id; /**< Current Encoder */
+       uint32_t connector_id; /**< Id */
+       uint32_t connector_type;
+       uint32_t connector_type_id;
+
+       uint32_t connection;
+       uint32_t mm_width, mm_height; /**< HxW in millimeters */
+       uint32_t subpixel;
+};
+
+#define DRM_MODE_PROP_PENDING  (1<<0)
+#define DRM_MODE_PROP_RANGE    (1<<1)
+#define DRM_MODE_PROP_IMMUTABLE        (1<<2)
+#define DRM_MODE_PROP_ENUM     (1<<3) /* enumerated type with text strings */
+#define DRM_MODE_PROP_BLOB     (1<<4)
+
+struct drm_mode_property_enum {
+       uint64_t value;
+       char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_mode_get_property {
+       uint64_t values_ptr; /* values and blob lengths */
+       uint64_t enum_blob_ptr; /* enum and blob id ptrs */
+
+       uint32_t prop_id;
+       uint32_t flags;
+       char name[DRM_PROP_NAME_LEN];
+
+       uint32_t count_values;
+       uint32_t count_enum_blobs;
+};
+
+struct drm_mode_connector_set_property {
+       uint64_t value;
+       uint32_t prop_id;
+       uint32_t connector_id;
+};
+
+struct drm_mode_get_blob {
+       uint32_t blob_id;
+       uint32_t length;
+       uint64_t data;
+};
+
+struct drm_mode_fb_cmd {
+       uint32_t fb_id;
+       uint32_t width, height;
+       uint32_t pitch;
+       uint32_t bpp;
+       uint32_t depth;
+       /* driver specific handle */
+       uint32_t handle;
+};
+
+struct drm_mode_mode_cmd {
+       uint32_t connector_id;
+       struct drm_mode_modeinfo mode;
+};
+
+#define DRM_MODE_CURSOR_BO     (1<<0)
+#define DRM_MODE_CURSOR_MOVE   (1<<1)
+
+/*
+ * depending on the value in flags diffrent members are used.
+ *
+ * CURSOR_BO uses
+ *    crtc
+ *    width
+ *    height
+ *    handle - if 0 turns the cursor of
+ *
+ * CURSOR_MOVE uses
+ *    crtc
+ *    x
+ *    y
+ */
+struct drm_mode_cursor {
+       uint32_t flags;
+       uint32_t crtc_id;
+       int32_t x;
+       int32_t y;
+       uint32_t width;
+       uint32_t height;
+       /* driver specific handle */
+       uint32_t handle;
+};
+
+struct drm_mode_crtc_lut {
+       uint32_t crtc_id;
+       uint32_t gamma_size;
+
+       /* pointers to arrays */
+       uint64_t red;
+       uint64_t green;
+       uint64_t blue;
+};
+
+#endif
index 480037331e4e39968c0609c0d91a27a6fd4c922c..ee5389d22c64745d72cc985d6024a5eec55176d4 100644 (file)
 
 /* SAREA area needs to be at least a page */
 #if defined(__alpha__)
-#define SAREA_MAX                       0x2000
+#define SAREA_MAX                       0x2000U
 #elif defined(__ia64__)
-#define SAREA_MAX                       0x10000        /* 64kB */
+#define SAREA_MAX                       0x10000U       /* 64kB */
 #else
 /* Intel 830M driver needs at least 8k SAREA */
-#define SAREA_MAX                       0x2000
+#define SAREA_MAX                       0x2000U
 #endif
 
 /** Maximum number of drawables in the SAREA */
index 152b34da927c1d8117c42d06d90a3adc0d214109..b3bcf72dc65623f412c3902a425ffdcd98b9b563 100644 (file)
@@ -113,8 +113,31 @@ typedef struct _drm_i915_sarea {
        int pipeB_y;
        int pipeB_w;
        int pipeB_h;
+
+       /* fill out some space for old userspace triple buffer */
+       drm_handle_t unused_handle;
+       uint32_t unused1, unused2, unused3;
+
+       /* buffer object handles for static buffers. May change
+        * over the lifetime of the client.
+        */
+       uint32_t front_bo_handle;
+       uint32_t back_bo_handle;
+       uint32_t unused_bo_handle;
+       uint32_t depth_bo_handle;
+
 } drm_i915_sarea_t;
 
+/* due to userspace building against these headers we need some compat here */
+#define planeA_x pipeA_x
+#define planeA_y pipeA_y
+#define planeA_w pipeA_w
+#define planeA_h pipeA_h
+#define planeB_x pipeB_x
+#define planeB_y pipeB_y
+#define planeB_w pipeB_w
+#define planeB_h pipeB_h
+
 /* Flags for perf_boxes
  */
 #define I915_BOX_RING_EMPTY    0x1
@@ -160,6 +183,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_GEM_SET_TILING        0x21
 #define DRM_I915_GEM_GET_TILING        0x22
 #define DRM_I915_GEM_GET_APERTURE 0x23
+#define DRM_I915_GEM_MMAP_GTT  0x24
 
 #define DRM_IOCTL_I915_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH           DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -177,6 +201,8 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 #define DRM_IOCTL_I915_VBLANK_SWAP     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_GEM_INIT                DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER  DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
 #define DRM_IOCTL_I915_GEM_PIN         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
 #define DRM_IOCTL_I915_GEM_UNPIN       DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
 #define DRM_IOCTL_I915_GEM_BUSY                DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
@@ -187,6 +213,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_PREAD       DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
 #define DRM_IOCTL_I915_GEM_PWRITE      DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
 #define DRM_IOCTL_I915_GEM_MMAP                DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_MMAP_GTT    DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
 #define DRM_IOCTL_I915_GEM_SET_DOMAIN  DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
 #define DRM_IOCTL_I915_GEM_SW_FINISH   DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
 #define DRM_IOCTL_I915_GEM_SET_TILING  DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
@@ -196,7 +223,7 @@ typedef struct _drm_i915_sarea {
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
  */
-typedef struct _drm_i915_batchbuffer {
+typedef struct drm_i915_batchbuffer {
        int start;              /* agp offset */
        int used;               /* nr bytes in use */
        int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
@@ -382,6 +409,18 @@ struct drm_i915_gem_mmap {
        uint64_t addr_ptr;
 };
 
+struct drm_i915_gem_mmap_gtt {
+       /** Handle for the object being mapped. */
+       uint32_t handle;
+       uint32_t pad;
+       /**
+        * Fake offset to use for subsequent mmap call
+        *
+        * This is a fixed-size type for 32/64 compatibility.
+        */
+       uint64_t offset;
+};
+
 struct drm_i915_gem_set_domain {
        /** Handle for the object */
        uint32_t handle;
index e531783e5d78db542fb4e86c602698bf5824d72b..95ac82340c3bc0914ad08e02bc337a17b0416e39 100644 (file)
@@ -313,6 +313,7 @@ unifdef-y += ptrace.h
 unifdef-y += qnx4_fs.h
 unifdef-y += quota.h
 unifdef-y += random.h
+unifdef-y += irqnr.h
 unifdef-y += reboot.h
 unifdef-y += reiserfs_fs.h
 unifdef-y += reiserfs_xattr.h
index f6b8cf99b596a30820215766fb7e680dedf7e6e1..b16a957030f87e16341efe5b41b476a04e742fc2 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/workqueue.h>
 #include <linux/aio_abi.h>
 #include <linux/uio.h>
+#include <linux/rcupdate.h>
 
 #include <asm/atomic.h>
 
@@ -183,7 +184,7 @@ struct kioctx {
 
        /* This needs improving */
        unsigned long           user_id;
-       struct kioctx           *next;
+       struct hlist_node       list;
 
        wait_queue_head_t       wait;
 
@@ -199,6 +200,8 @@ struct kioctx {
        struct aio_ring_info    ring_info;
 
        struct delayed_work     wq;
+
+       struct rcu_head         rcu_head;
 };
 
 /* prototypes */
index 6a642098e5c3524ad69fd15993268f35ef02cad9..18462c5b8fff91e0b4d5341dac07e4e263907690 100644 (file)
@@ -90,10 +90,11 @@ struct bio {
 
        unsigned int            bi_comp_cpu;    /* completion CPU */
 
+       atomic_t                bi_cnt;         /* pin count */
+
        struct bio_vec          *bi_io_vec;     /* the actual vec list */
 
        bio_end_io_t            *bi_end_io;
-       atomic_t                bi_cnt;         /* pin count */
 
        void                    *bi_private;
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -101,6 +102,13 @@ struct bio {
 #endif
 
        bio_destructor_t        *bi_destructor; /* destructor */
+
+       /*
+        * We can inline a number of vecs at the end of the bio, to avoid
+        * double allocations for a small number of bio_vecs. This member
+        * MUST obviously be kept at the very end of the bio.
+        */
+       struct bio_vec          bi_inline_vecs[0];
 };
 
 /*
@@ -117,6 +125,7 @@ struct bio {
 #define BIO_CPU_AFFINE 8       /* complete bio on same CPU as submitted */
 #define BIO_NULL_MAPPED 9      /* contains invalid user pages */
 #define BIO_FS_INTEGRITY 10    /* fs owns integrity data, not block layer */
+#define BIO_QUIET      11      /* Make BIO Quiet */
 #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
 
 /*
@@ -211,6 +220,11 @@ static inline void *bio_data(struct bio *bio)
        return NULL;
 }
 
+static inline int bio_has_allocated_vec(struct bio *bio)
+{
+       return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
+}
+
 /*
  * will die
  */
@@ -332,7 +346,7 @@ struct bio_pair {
 extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
 extern void bio_pair_release(struct bio_pair *dbio);
 
-extern struct bio_set *bioset_create(int, int);
+extern struct bio_set *bioset_create(unsigned int, unsigned int);
 extern void bioset_free(struct bio_set *);
 
 extern struct bio *bio_alloc(gfp_t, int);
@@ -377,6 +391,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
 extern int bio_uncopy_user(struct bio *);
 void zero_fill_bio(struct bio *bio);
 extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
+extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
 extern unsigned int bvec_nr_vecs(unsigned short idx);
 
 /*
@@ -395,13 +410,17 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
  */
 #define BIO_POOL_SIZE 2
 #define BIOVEC_NR_POOLS 6
+#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
 
 struct bio_set {
+       struct kmem_cache *bio_slab;
+       unsigned int front_pad;
+
        mempool_t *bio_pool;
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
        mempool_t *bio_integrity_pool;
 #endif
-       mempool_t *bvec_pools[BIOVEC_NR_POOLS];
+       mempool_t *bvec_pool;
 };
 
 struct biovec_slab {
@@ -411,6 +430,7 @@ struct biovec_slab {
 };
 
 extern struct bio_set *fs_bio_set;
+extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
 
 /*
  * a small number of entries is fine, not going to be performance critical.
index 031a315c0509e5aa9eaa73c080b7e76a44dc95a0..7035cec583b6cd2854d14415139040032089b0ac 100644 (file)
@@ -26,7 +26,6 @@ struct scsi_ioctl_command;
 
 struct request_queue;
 struct elevator_queue;
-typedef struct elevator_queue elevator_t;
 struct request_pm_state;
 struct blk_trace;
 struct request;
@@ -313,7 +312,7 @@ struct request_queue
         */
        struct list_head        queue_head;
        struct request          *last_merge;
-       elevator_t              *elevator;
+       struct elevator_queue   *elevator;
 
        /*
         * the queue request freelist, one for reads and one for writes
@@ -449,6 +448,7 @@ struct request_queue
 #define QUEUE_FLAG_FAIL_IO     12      /* fake timeout */
 #define QUEUE_FLAG_STACKABLE   13      /* supports request stacking */
 #define QUEUE_FLAG_NONROT      14      /* non-rotational device (SSD) */
+#define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
 
 static inline int queue_is_locked(struct request_queue *q)
 {
@@ -522,22 +522,32 @@ enum {
         * TAG_FLUSH    : ordering by tag w/ pre and post flushes
         * TAG_FUA      : ordering by tag w/ pre flush and FUA write
         */
-       QUEUE_ORDERED_NONE      = 0x00,
-       QUEUE_ORDERED_DRAIN     = 0x01,
-       QUEUE_ORDERED_TAG       = 0x02,
-
-       QUEUE_ORDERED_PREFLUSH  = 0x10,
-       QUEUE_ORDERED_POSTFLUSH = 0x20,
-       QUEUE_ORDERED_FUA       = 0x40,
-
-       QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
-       QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
-       QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
-       QUEUE_ORDERED_TAG_FUA   = QUEUE_ORDERED_TAG |
-                       QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
+       QUEUE_ORDERED_BY_DRAIN          = 0x01,
+       QUEUE_ORDERED_BY_TAG            = 0x02,
+       QUEUE_ORDERED_DO_PREFLUSH       = 0x10,
+       QUEUE_ORDERED_DO_BAR            = 0x20,
+       QUEUE_ORDERED_DO_POSTFLUSH      = 0x40,
+       QUEUE_ORDERED_DO_FUA            = 0x80,
+
+       QUEUE_ORDERED_NONE              = 0x00,
+
+       QUEUE_ORDERED_DRAIN             = QUEUE_ORDERED_BY_DRAIN |
+                                         QUEUE_ORDERED_DO_BAR,
+       QUEUE_ORDERED_DRAIN_FLUSH       = QUEUE_ORDERED_DRAIN |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_POSTFLUSH,
+       QUEUE_ORDERED_DRAIN_FUA         = QUEUE_ORDERED_DRAIN |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_FUA,
+
+       QUEUE_ORDERED_TAG               = QUEUE_ORDERED_BY_TAG |
+                                         QUEUE_ORDERED_DO_BAR,
+       QUEUE_ORDERED_TAG_FLUSH         = QUEUE_ORDERED_TAG |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_POSTFLUSH,
+       QUEUE_ORDERED_TAG_FUA           = QUEUE_ORDERED_TAG |
+                                         QUEUE_ORDERED_DO_PREFLUSH |
+                                         QUEUE_ORDERED_DO_FUA,
 
        /*
         * Ordered operation sequence
@@ -585,7 +595,6 @@ enum {
 #define blk_fua_rq(rq)         ((rq)->cmd_flags & REQ_FUA)
 #define blk_discard_rq(rq)     ((rq)->cmd_flags & REQ_DISCARD)
 #define blk_bidi_rq(rq)                ((rq)->next_rq != NULL)
-#define blk_empty_barrier(rq)  (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
 /* rq->queuelist of dequeued request must be list_empty() */
 #define blk_queued_rq(rq)      (!list_empty(&(rq)->queuelist))
 
@@ -855,10 +864,10 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
 extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
-extern int blk_do_ordered(struct request_queue *, struct request **);
+extern bool blk_do_ordered(struct request_queue *, struct request **);
 extern unsigned blk_ordered_cur_seq(struct request_queue *);
 extern unsigned blk_ordered_req_seq(struct request *);
-extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
+extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
 
 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
 extern void blk_dump_rq_flags(struct request *, char *);
@@ -977,7 +986,6 @@ static inline void put_dev_sector(Sector p)
 
 struct work_struct;
 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
-void kblockd_flush_work(struct work_struct *work);
 
 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
        MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
index 777dbf695d449e4b0d03346a8b5c39877ee34d97..27b1bcffe40853e8e1f6c1f629525c94ec2a343e 100644 (file)
@@ -2,7 +2,6 @@
 #define _LINUX_BH_H
 
 extern void local_bh_disable(void);
-extern void __local_bh_enable(void);
 extern void _local_bh_enable(void);
 extern void local_bh_enable(void);
 extern void local_bh_enable_ip(unsigned long ip);
index 3ce64b90118c20346a704d1c727ec9450d7ccb95..8605f8a74df9ba031fb654095bd47b62095aac0f 100644 (file)
@@ -35,6 +35,7 @@ enum bh_state_bits {
        BH_Ordered,     /* ordered write */
        BH_Eopnotsupp,  /* operation not supported (barrier) */
        BH_Unwritten,   /* Buffer is allocated on disk but not written */
+       BH_Quiet,       /* Buffer Error Prinks to be quiet */
 
        BH_PrivateStart,/* not a state bit, but the first bit available
                         * for private allocation by other entities
index 248e6e3b9b734d515e0c74f95aabd1de2dd57b6d..a67a90cf826882bb6ad47c119de04abac49cd91d 100644 (file)
@@ -153,4 +153,8 @@ void vcs_remove_sysfs(struct tty_struct *tty);
 #define VESA_HSYNC_SUSPEND      2
 #define VESA_POWERDOWN          3
 
+#ifdef CONFIG_VGA_CONSOLE
+extern bool vgacon_text_force(void);
+#endif
+
 #endif /* _LINUX_CONSOLE_H */
index 4aaa4afb1cb99f849bb345a8189153e65e94cbcd..096476f1fb356a2c17ab5d267be9a2295f55295a 100644 (file)
@@ -17,7 +17,7 @@ extern int debug_locks_off(void);
 ({                                                                     \
        int __ret = 0;                                                  \
                                                                        \
-       if (unlikely(c)) {                                              \
+       if (!oops_in_progress && unlikely(c)) {                         \
                if (debug_locks_off() && !debug_locks_silent)           \
                        WARN_ON(1);                                     \
                __ret = 1;                                              \
index 92f6f634e3e62ce77c38d46e6ecb88bf396ceea2..7a204256b1550f889f96a666808e805fe749c186 100644 (file)
@@ -28,7 +28,7 @@ typedef void (elevator_activate_req_fn) (struct request_queue *, struct request
 typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
 
 typedef void *(elevator_init_fn) (struct request_queue *);
-typedef void (elevator_exit_fn) (elevator_t *);
+typedef void (elevator_exit_fn) (struct elevator_queue *);
 
 struct elevator_ops
 {
@@ -62,8 +62,8 @@ struct elevator_ops
 
 struct elv_fs_entry {
        struct attribute attr;
-       ssize_t (*show)(elevator_t *, char *);
-       ssize_t (*store)(elevator_t *, const char *, size_t);
+       ssize_t (*show)(struct elevator_queue *, char *);
+       ssize_t (*store)(struct elevator_queue *, const char *, size_t);
 };
 
 /*
@@ -130,7 +130,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
 extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
 
 extern int elevator_init(struct request_queue *, char *);
-extern void elevator_exit(elevator_t *);
+extern void elevator_exit(struct elevator_queue *);
 extern int elv_rq_merge_ok(struct request *, struct bio *);
 
 /*
index 586ab56a3ec3500e601e786e0e858f1980fbc8e5..3bf5bb5a34f9fba43b9248caf434eb6059e6329e 100644 (file)
@@ -25,7 +25,8 @@ union ktime;
 #define FUTEX_WAKE_BITSET      10
 
 #define FUTEX_PRIVATE_FLAG     128
-#define FUTEX_CMD_MASK         ~FUTEX_PRIVATE_FLAG
+#define FUTEX_CLOCK_REALTIME   256
+#define FUTEX_CMD_MASK         ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
 
 #define FUTEX_WAIT_PRIVATE     (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
 #define FUTEX_WAKE_PRIVATE     (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)
@@ -164,6 +165,8 @@ union futex_key {
        } both;
 };
 
+#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+
 #ifdef CONFIG_FUTEX
 extern void exit_robust_list(struct task_struct *curr);
 extern void exit_pi_state_list(struct task_struct *curr);
index 3df7742ce2469e4bdc3673ec904b77e9e4eab6f7..16948eaecae3c3d4969cbbeb23d8106b0aeeb5da 100644 (file)
@@ -126,6 +126,7 @@ struct blk_scsi_cmd_filter {
 struct disk_part_tbl {
        struct rcu_head rcu_head;
        int len;
+       struct hd_struct *last_lookup;
        struct hd_struct *part[];
 };
 
index 89a56d79e4c6c4987531a10ad8fed17f3d597bf7..f83288347dda3455e2deaa057112707accacaa93 100644 (file)
@@ -119,13 +119,17 @@ static inline void account_system_vtime(struct task_struct *tsk)
 }
 #endif
 
-#if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ)
+#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU)
 extern void rcu_irq_enter(void);
 extern void rcu_irq_exit(void);
+extern void rcu_nmi_enter(void);
+extern void rcu_nmi_exit(void);
 #else
 # define rcu_irq_enter() do { } while (0)
 # define rcu_irq_exit() do { } while (0)
-#endif /* CONFIG_PREEMPT_RCU */
+# define rcu_nmi_enter() do { } while (0)
+# define rcu_nmi_exit() do { } while (0)
+#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */
 
 /*
  * It is safe to do non-atomic ops on ->hardirq_context,
@@ -135,7 +139,6 @@ extern void rcu_irq_exit(void);
  */
 #define __irq_enter()                                  \
        do {                                            \
-               rcu_irq_enter();                        \
                account_system_vtime(current);          \
                add_preempt_count(HARDIRQ_OFFSET);      \
                trace_hardirq_enter();                  \
@@ -154,7 +157,6 @@ extern void irq_enter(void);
                trace_hardirq_exit();                   \
                account_system_vtime(current);          \
                sub_preempt_count(HARDIRQ_OFFSET);      \
-               rcu_irq_exit();                         \
        } while (0)
 
 /*
@@ -166,11 +168,14 @@ extern void irq_exit(void);
        do {                                    \
                ftrace_nmi_enter();             \
                lockdep_off();                  \
+               rcu_nmi_enter();                \
                __irq_enter();                  \
        } while (0)
+
 #define nmi_exit()                             \
        do {                                    \
                __irq_exit();                   \
+               rcu_nmi_exit();                 \
                lockdep_on();                   \
                ftrace_nmi_exit();              \
        } while (0)
index 3eba43878dcb2da2e6d4ad6a8b029ed13e3f8e6a..bd37078c2d7d7ceb9f9f7b9bd2340724ae29c73d 100644 (file)
@@ -42,26 +42,6 @@ enum hrtimer_restart {
        HRTIMER_RESTART,        /* Timer must be restarted */
 };
 
-/*
- * hrtimer callback modes:
- *
- *     HRTIMER_CB_SOFTIRQ:             Callback must run in softirq context
- *     HRTIMER_CB_IRQSAFE_PERCPU:      Callback must run in hardirq context
- *                                     Special mode for tick emulation and
- *                                     scheduler timer. Such timers are per
- *                                     cpu and not allowed to be migrated on
- *                                     cpu unplug.
- *     HRTIMER_CB_IRQSAFE_UNLOCKED:    Callback should run in hardirq context
- *                                     with timer->base lock unlocked
- *                                     used for timers which call wakeup to
- *                                     avoid lock order problems with rq->lock
- */
-enum hrtimer_cb_mode {
-       HRTIMER_CB_SOFTIRQ,
-       HRTIMER_CB_IRQSAFE_PERCPU,
-       HRTIMER_CB_IRQSAFE_UNLOCKED,
-};
-
 /*
  * Values to track state of the timer
  *
@@ -70,7 +50,6 @@ enum hrtimer_cb_mode {
  * 0x00                inactive
  * 0x01                enqueued into rbtree
  * 0x02                callback function running
- * 0x04                callback pending (high resolution mode)
  *
  * Special cases:
  * 0x03                callback function running and enqueued
@@ -92,8 +71,7 @@ enum hrtimer_cb_mode {
 #define HRTIMER_STATE_INACTIVE 0x00
 #define HRTIMER_STATE_ENQUEUED 0x01
 #define HRTIMER_STATE_CALLBACK 0x02
-#define HRTIMER_STATE_PENDING  0x04
-#define HRTIMER_STATE_MIGRATE  0x08
+#define HRTIMER_STATE_MIGRATE  0x04
 
 /**
  * struct hrtimer - the basic hrtimer structure
@@ -109,8 +87,6 @@ enum hrtimer_cb_mode {
  * @function:  timer expiry callback function
  * @base:      pointer to the timer base (per cpu and per clock)
  * @state:     state information (See bit values above)
- * @cb_mode:   high resolution timer feature to select the callback execution
- *              mode
  * @cb_entry:  list head to enqueue an expired timer into the callback list
  * @start_site:        timer statistics field to store the site where the timer
  *             was started
@@ -129,7 +105,6 @@ struct hrtimer {
        struct hrtimer_clock_base       *base;
        unsigned long                   state;
        struct list_head                cb_entry;
-       enum hrtimer_cb_mode            cb_mode;
 #ifdef CONFIG_TIMER_STATS
        int                             start_pid;
        void                            *start_site;
@@ -188,15 +163,11 @@ struct hrtimer_clock_base {
  * @check_clocks:      Indictator, when set evaluate time source and clock
  *                     event devices whether high resolution mode can be
  *                     activated.
- * @cb_pending:                Expired timers are moved from the rbtree to this
- *                     list in the timer interrupt. The list is processed
- *                     in the softirq.
  * @nr_events:         Total number of timer interrupt events
  */
 struct hrtimer_cpu_base {
        spinlock_t                      lock;
        struct hrtimer_clock_base       clock_base[HRTIMER_MAX_CLOCK_BASES];
-       struct list_head                cb_pending;
 #ifdef CONFIG_HIGH_RES_TIMERS
        ktime_t                         expires_next;
        int                             hres_active;
@@ -404,8 +375,7 @@ static inline int hrtimer_active(const struct hrtimer *timer)
  */
 static inline int hrtimer_is_queued(struct hrtimer *timer)
 {
-       return timer->state &
-               (HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING);
+       return timer->state & HRTIMER_STATE_ENQUEUED;
 }
 
 /*
index f58a0cf8929a81fb14025ab8683ab32bf8ab539c..be3c484b5242555082763718f4818eacdb37f020 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/irqflags.h>
 #include <linux/smp.h>
 #include <linux/percpu.h>
+#include <linux/irqnr.h>
+
 #include <asm/atomic.h>
 #include <asm/ptrace.h>
 #include <asm/system.h>
@@ -251,9 +253,6 @@ enum
        BLOCK_SOFTIRQ,
        TASKLET_SOFTIRQ,
        SCHED_SOFTIRQ,
-#ifdef CONFIG_HIGH_RES_TIMERS
-       HRTIMER_SOFTIRQ,
-#endif
        RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
 
        NR_SOFTIRQS
index 3dddfa703ebd95ab1253c56397104c2607f98e90..98564dc6447627f0d6c25033e3ba601db819daf1 100644 (file)
@@ -129,9 +129,14 @@ struct irq_chip {
        const char      *typename;
 };
 
+struct timer_rand_state;
+struct irq_2_iommu;
 /**
  * struct irq_desc - interrupt descriptor
  * @irq:               interrupt number for this descriptor
+ * @timer_rand_state:  pointer to timer rand state struct
+ * @kstat_irqs:                irq stats per cpu
+ * @irq_2_iommu:       iommu with this irq
  * @handle_irq:                highlevel irq-events handler [if NULL, __do_IRQ()]
  * @chip:              low level interrupt hardware access
  * @msi_desc:          MSI descriptor
@@ -143,8 +148,8 @@ struct irq_chip {
  * @depth:             disable-depth, for nested irq_disable() calls
  * @wake_depth:                enable depth, for multiple set_irq_wake() callers
  * @irq_count:         stats field to detect stalled irqs
- * @irqs_unhandled:    stats field for spurious unhandled interrupts
  * @last_unhandled:    aging timer for unhandled count
+ * @irqs_unhandled:    stats field for spurious unhandled interrupts
  * @lock:              locking for SMP
  * @affinity:          IRQ affinity on SMP
  * @cpu:               cpu index useful for balancing
@@ -154,6 +159,13 @@ struct irq_chip {
  */
 struct irq_desc {
        unsigned int            irq;
+#ifdef CONFIG_SPARSE_IRQ
+       struct timer_rand_state *timer_rand_state;
+       unsigned int            *kstat_irqs;
+# ifdef CONFIG_INTR_REMAP
+       struct irq_2_iommu      *irq_2_iommu;
+# endif
+#endif
        irq_flow_handler_t      handle_irq;
        struct irq_chip         *chip;
        struct msi_desc         *msi_desc;
@@ -165,8 +177,8 @@ struct irq_desc {
        unsigned int            depth;          /* nested irq disables */
        unsigned int            wake_depth;     /* nested wake enables */
        unsigned int            irq_count;      /* For detecting broken IRQs */
-       unsigned int            irqs_unhandled;
        unsigned long           last_unhandled; /* Aging timer for unhandled count */
+       unsigned int            irqs_unhandled;
        spinlock_t              lock;
 #ifdef CONFIG_SMP
        cpumask_t               affinity;
@@ -181,12 +193,51 @@ struct irq_desc {
        const char              *name;
 } ____cacheline_internodealigned_in_smp;
 
+extern void early_irq_init(void);
+extern void arch_early_irq_init(void);
+extern void arch_init_chip_data(struct irq_desc *desc, int cpu);
+extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
+                                       struct irq_desc *desc, int cpu);
+extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
 
+#ifndef CONFIG_SPARSE_IRQ
 extern struct irq_desc irq_desc[NR_IRQS];
 
 static inline struct irq_desc *irq_to_desc(unsigned int irq)
 {
-       return (irq < nr_irqs) ? irq_desc + irq : NULL;
+       return (irq < NR_IRQS) ? irq_desc + irq : NULL;
+}
+static inline struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
+{
+       return irq_to_desc(irq);
+}
+
+#else
+
+extern struct irq_desc *irq_to_desc(unsigned int irq);
+extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
+extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
+
+# define for_each_irq_desc(irq, desc)          \
+       for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; irq++, desc = irq_to_desc(irq))
+# define for_each_irq_desc_reverse(irq, desc)                          \
+       for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; irq--, desc = irq_to_desc(irq))
+
+#define kstat_irqs_this_cpu(DESC) \
+       ((DESC)->kstat_irqs[smp_processor_id()])
+#define kstat_incr_irqs_this_cpu(irqno, DESC) \
+       ((DESC)->kstat_irqs[smp_processor_id()]++)
+
+#endif
+
+static inline struct irq_desc *
+irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
+{
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+       return irq_to_desc(irq);
+#else
+       return desc;
+#endif
 }
 
 /*
@@ -380,6 +431,11 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
 #define get_irq_data(irq)      (irq_to_desc(irq)->handler_data)
 #define get_irq_msi(irq)       (irq_to_desc(irq)->msi_desc)
 
+#define get_irq_desc_chip(desc)                ((desc)->chip)
+#define get_irq_desc_chip_data(desc)   ((desc)->chip_data)
+#define get_irq_desc_data(desc)                ((desc)->handler_data)
+#define get_irq_desc_msi(desc)         ((desc)->msi_desc)
+
 #endif /* CONFIG_GENERIC_HARDIRQS */
 
 #endif /* !CONFIG_S390 */
index 452c280c8115525c2c294bebb3104bd6e06645a9..95d2b74641f5dbe2ecb90bb68d430ab600d61b74 100644 (file)
@@ -1,24 +1,38 @@
 #ifndef _LINUX_IRQNR_H
 #define _LINUX_IRQNR_H
 
+/*
+ * Generic irq_desc iterators:
+ */
+#ifdef __KERNEL__
+
 #ifndef CONFIG_GENERIC_HARDIRQS
 #include <asm/irq.h>
 # define nr_irqs               NR_IRQS
 
 # define for_each_irq_desc(irq, desc)          \
        for (irq = 0; irq < nr_irqs; irq++)
+
+# define for_each_irq_desc_reverse(irq, desc)                          \
+       for (irq = nr_irqs - 1; irq >= 0; irq--)
 #else
+
 extern int nr_irqs;
 
+#ifndef CONFIG_SPARSE_IRQ
+
+struct irq_desc;
 # define for_each_irq_desc(irq, desc)          \
        for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++)
-
-# define for_each_irq_desc_reverse(irq, desc)                          \
-       for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1);        \
-            irq >= 0; irq--, desc--)
+# define for_each_irq_desc_reverse(irq, desc)                          \
+       for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1);        \
+           irq >= 0; irq--, desc--)
+#endif
 #endif
 
-#define for_each_irq_nr(irq)                   \
-       for (irq = 0; irq < nr_irqs; irq++)
+#define for_each_irq_nr(irq)                   \
+       for (irq = 0; irq < nr_irqs; irq++)
+
+#endif /* __KERNEL__ */
 
 #endif
index 6002ae76785c9aeba493c0960aa5de508b139b49..ca9ff6411dfa778fed80eae8360b6136529f8f11 100644 (file)
@@ -141,6 +141,15 @@ extern int _cond_resched(void);
                (__x < 0) ? -__x : __x;         \
        })
 
+#ifdef CONFIG_PROVE_LOCKING
+void might_fault(void);
+#else
+static inline void might_fault(void)
+{
+       might_sleep();
+}
+#endif
+
 extern struct atomic_notifier_head panic_notifier_list;
 extern long (*panic_blink)(long time);
 NORET_TYPE void panic(const char * fmt, ...)
@@ -188,6 +197,8 @@ extern unsigned long long memparse(const char *ptr, char **retptr);
 extern int core_kernel_text(unsigned long addr);
 extern int __kernel_text_address(unsigned long addr);
 extern int kernel_text_address(unsigned long addr);
+extern int func_ptr_is_kernel_text(void *ptr);
+
 struct pid;
 extern struct pid *session_of_pgrp(struct pid *pgrp);
 
index 4a145caeee075d3209fa4e0d324dfed7f9a15fe7..4ee4b3d2316ffc78495e7bfa0dd3017f220c05c9 100644 (file)
@@ -28,7 +28,9 @@ struct cpu_usage_stat {
 
 struct kernel_stat {
        struct cpu_usage_stat   cpustat;
-       unsigned int irqs[NR_IRQS];
+#ifndef CONFIG_SPARSE_IRQ
+       unsigned int irqs[NR_IRQS];
+#endif
 };
 
 DECLARE_PER_CPU(struct kernel_stat, kstat);
@@ -39,6 +41,10 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
 
 extern unsigned long long nr_context_switches(void);
 
+#ifndef CONFIG_SPARSE_IRQ
+#define kstat_irqs_this_cpu(irq) \
+       (kstat_this_cpu.irqs[irq])
+
 struct irq_desc;
 
 static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
@@ -46,11 +52,17 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
 {
        kstat_this_cpu.irqs[irq]++;
 }
+#endif
+
 
+#ifndef CONFIG_SPARSE_IRQ
 static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
 {
        return kstat_cpu(cpu).irqs[irq];
 }
+#else
+extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
+#endif
 
 /*
  * Number of interrupts per specific IRQ source, since bootup
index 29aec6e100203da5e8b0f40ab726360ca08b8e72..23bf02fb124ffae0dfb282453582385ea34d1919 100644 (file)
@@ -73,6 +73,8 @@ struct lock_class_key {
        struct lockdep_subclass_key     subkeys[MAX_LOCKDEP_SUBCLASSES];
 };
 
+#define LOCKSTAT_POINTS                4
+
 /*
  * The lock-class itself:
  */
@@ -119,7 +121,8 @@ struct lock_class {
        int                             name_version;
 
 #ifdef CONFIG_LOCK_STAT
-       unsigned long                   contention_point[4];
+       unsigned long                   contention_point[LOCKSTAT_POINTS];
+       unsigned long                   contending_point[LOCKSTAT_POINTS];
 #endif
 };
 
@@ -144,6 +147,7 @@ enum bounce_type {
 
 struct lock_class_stats {
        unsigned long                   contention_point[4];
+       unsigned long                   contending_point[4];
        struct lock_time                read_waittime;
        struct lock_time                write_waittime;
        struct lock_time                read_holdtime;
@@ -165,6 +169,7 @@ struct lockdep_map {
        const char                      *name;
 #ifdef CONFIG_LOCK_STAT
        int                             cpu;
+       unsigned long                   ip;
 #endif
 };
 
@@ -309,8 +314,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 extern void lock_release(struct lockdep_map *lock, int nested,
                         unsigned long ip);
 
-extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass,
-                             unsigned long ip);
+extern void lock_set_class(struct lockdep_map *lock, const char *name,
+                          struct lock_class_key *key, unsigned int subclass,
+                          unsigned long ip);
+
+static inline void lock_set_subclass(struct lockdep_map *lock,
+               unsigned int subclass, unsigned long ip)
+{
+       lock_set_class(lock, lock->name, lock->key, subclass, ip);
+}
 
 # define INIT_LOCKDEP                          .lockdep_recursion = 0,
 
@@ -328,6 +340,7 @@ static inline void lockdep_on(void)
 
 # define lock_acquire(l, s, t, r, c, n, i)     do { } while (0)
 # define lock_release(l, n, i)                 do { } while (0)
+# define lock_set_class(l, n, k, s, i)         do { } while (0)
 # define lock_set_subclass(l, s, i)            do { } while (0)
 # define lockdep_init()                                do { } while (0)
 # define lockdep_info()                                do { } while (0)
@@ -356,7 +369,7 @@ struct lock_class_key { };
 #ifdef CONFIG_LOCK_STAT
 
 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
-extern void lock_acquired(struct lockdep_map *lock);
+extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
 
 #define LOCK_CONTENDED(_lock, try, lock)                       \
 do {                                                           \
@@ -364,20 +377,20 @@ do {                                                              \
                lock_contended(&(_lock)->dep_map, _RET_IP_);    \
                lock(_lock);                                    \
        }                                                       \
-       lock_acquired(&(_lock)->dep_map);                       \
+       lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
 } while (0)
 
 #else /* CONFIG_LOCK_STAT */
 
 #define lock_contended(lockdep_map, ip) do {} while (0)
-#define lock_acquired(lockdep_map) do {} while (0)
+#define lock_acquired(lockdep_map, ip) do {} while (0)
 
 #define LOCK_CONTENDED(_lock, try, lock) \
        lock(_lock)
 
 #endif /* CONFIG_LOCK_STAT */
 
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
+#ifdef CONFIG_GENERIC_HARDIRQS
 extern void early_init_irq_lock_class(void);
 #else
 static inline void early_init_irq_lock_class(void)
@@ -481,4 +494,22 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 # define lock_map_release(l)                   do { } while (0)
 #endif
 
+#ifdef CONFIG_PROVE_LOCKING
+# define might_lock(lock)                                              \
+do {                                                                   \
+       typecheck(struct lockdep_map *, &(lock)->dep_map);              \
+       lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_);    \
+       lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
+} while (0)
+# define might_lock_read(lock)                                                 \
+do {                                                                   \
+       typecheck(struct lockdep_map *, &(lock)->dep_map);              \
+       lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_);    \
+       lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
+} while (0)
+#else
+# define might_lock(lock) do { } while (0)
+# define might_lock_read(lock) do { } while (0)
+#endif
+
 #endif /* __LINUX_LOCKDEP_H */
index fe825471d5aaf9d59440a7f322989004736ea71f..9cfc9b627fdd745d4702a903e29db36f2676aa34 100644 (file)
@@ -232,8 +232,9 @@ struct mm_struct {
        struct core_state *core_state; /* coredumping support */
 
        /* aio bits */
-       rwlock_t                ioctx_list_lock;        /* aio lock */
-       struct kioctx           *ioctx_list;
+       spinlock_t              ioctx_lock;
+       struct hlist_head       ioctx_list;
+
 #ifdef CONFIG_MM_OWNER
        /*
         * "owner" points to a task that is regarded as the canonical
index 8f293922720735ee164cf4b5183f064d09e9f5a6..d2b8a1e8ca11000cf622717ab3302bf9a64888c3 100644 (file)
@@ -10,8 +10,11 @@ struct msi_msg {
 };
 
 /* Helper functions */
+struct irq_desc;
 extern void mask_msi_irq(unsigned int irq);
 extern void unmask_msi_irq(unsigned int irq);
+extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
+extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
 extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
 extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
 
index bc6da10ceee002ef14850e8025839e67d0d87c48..7a0e5c4f8072c53f4b0d6a7dfa681355324e065f 100644 (file)
@@ -144,6 +144,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
 /*
  * NOTE: mutex_trylock() follows the spin_trylock() convention,
  *       not the down_trylock() convention!
+ *
+ * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
  */
 extern int mutex_trylock(struct mutex *lock);
 extern void mutex_unlock(struct mutex *lock);
index a8efcfeea7323365b65ac3b6098e053af3f14de7..3d327b67d7e2b5927cab4739a061dbf35a10f91e 100644 (file)
@@ -26,8 +26,7 @@ extern struct bus_type of_platform_bus_type;
 
 /*
  * An of_platform_driver driver is attached to a basic of_device on
- * the "platform bus" (of_platform_bus_type) (or ISA, EBUS and SBUS
- * busses on sparc).
+ * the "platform bus" (of_platform_bus_type).
  */
 struct of_platform_driver
 {
index a7c7213555492520e649a4920d9fc671e88460cd..4f71bf4e628c0796a398fa8ad0bd84b95efe1ee8 100644 (file)
@@ -45,7 +45,11 @@ struct k_itimer {
        int it_requeue_pending;         /* waiting to requeue this timer */
 #define REQUEUE_PENDING 1
        int it_sigev_notify;            /* notify word of sigevent struct */
-       struct task_struct *it_process; /* process to send signal to */
+       struct signal_struct *it_signal;
+       union {
+               struct pid *it_pid;     /* pid of process to send signal to */
+               struct task_struct *it_process; /* for clock_nanosleep */
+       };
        struct sigqueue *sigq;          /* signal queue entry. */
        union {
                struct {
index 36f125c0c6037f0270941e96e63790151a66fe34..adbf3bd3c6b3c8edea3e313a4abce0a736d02ebf 100644 (file)
@@ -8,6 +8,7 @@
 #define _LINUX_RANDOM_H
 
 #include <linux/ioctl.h>
+#include <linux/irqnr.h>
 
 /* ioctl()'s for the random number generator */
 
@@ -44,6 +45,56 @@ struct rand_pool_info {
 
 extern void rand_initialize_irq(int irq);
 
+struct timer_rand_state;
+#ifndef CONFIG_SPARSE_IRQ
+
+extern struct timer_rand_state *irq_timer_state[];
+
+static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+{
+       if (irq >= nr_irqs)
+               return NULL;
+
+       return irq_timer_state[irq];
+}
+
+static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
+{
+       if (irq >= nr_irqs)
+               return;
+
+       irq_timer_state[irq] = state;
+}
+
+#else
+
+#include <linux/irq.h>
+static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+{
+       struct irq_desc *desc;
+
+       desc = irq_to_desc(irq);
+
+       if (!desc)
+               return NULL;
+
+       return desc->timer_rand_state;
+}
+
+static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
+{
+       struct irq_desc *desc;
+
+       desc = irq_to_desc(irq);
+
+       if (!desc)
+               return;
+
+       desc->timer_rand_state = state;
+}
+#endif
+
+
 extern void add_input_randomness(unsigned int type, unsigned int code,
                                 unsigned int value);
 extern void add_interrupt_randomness(int irq);
index 5f89b62e6983192befd7cc827df50fb53cd0a173..301dda829e37499f5cdaa0950e3a11f0c4ed3b74 100644 (file)
@@ -41,7 +41,7 @@
 #include <linux/seqlock.h>
 
 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-#define RCU_SECONDS_TILL_STALL_CHECK   ( 3 * HZ) /* for rcp->jiffies_stall */
+#define RCU_SECONDS_TILL_STALL_CHECK   (10 * HZ) /* for rcp->jiffies_stall */
 #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
 
index 895dc9c1088c767ce4706814b806c20248423241..1168fbcea8d4bc6a42d4abccf5b05e47266aa418 100644 (file)
@@ -52,11 +52,15 @@ struct rcu_head {
        void (*func)(struct rcu_head *head);
 };
 
-#ifdef CONFIG_CLASSIC_RCU
+#if defined(CONFIG_CLASSIC_RCU)
 #include <linux/rcuclassic.h>
-#else /* #ifdef CONFIG_CLASSIC_RCU */
+#elif defined(CONFIG_TREE_RCU)
+#include <linux/rcutree.h>
+#elif defined(CONFIG_PREEMPT_RCU)
 #include <linux/rcupreempt.h>
-#endif /* #else #ifdef CONFIG_CLASSIC_RCU */
+#else
+#error "Unknown RCU implementation specified to kernel configuration"
+#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */
 
 #define RCU_HEAD_INIT  { .next = NULL, .func = NULL }
 #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
new file mode 100644 (file)
index 0000000..d4368b7
--- /dev/null
@@ -0,0 +1,329 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion (tree-based version)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2008
+ *
+ * Author: Dipankar Sarma <dipankar@in.ibm.com>
+ *        Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm
+ *
+ * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ *     Documentation/RCU
+ */
+
+#ifndef __LINUX_RCUTREE_H
+#define __LINUX_RCUTREE_H
+
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/seqlock.h>
+
+/*
+ * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
+ * In theory, it should be possible to add more levels straightforwardly.
+ * In practice, this has not been tested, so there is probably some
+ * bug somewhere.
+ */
+#define MAX_RCU_LVLS 3
+#define RCU_FANOUT           (CONFIG_RCU_FANOUT)
+#define RCU_FANOUT_SQ        (RCU_FANOUT * RCU_FANOUT)
+#define RCU_FANOUT_CUBE              (RCU_FANOUT_SQ * RCU_FANOUT)
+
+#if NR_CPUS <= RCU_FANOUT
+#  define NUM_RCU_LVLS       1
+#  define NUM_RCU_LVL_0              1
+#  define NUM_RCU_LVL_1              (NR_CPUS)
+#  define NUM_RCU_LVL_2              0
+#  define NUM_RCU_LVL_3              0
+#elif NR_CPUS <= RCU_FANOUT_SQ
+#  define NUM_RCU_LVLS       2
+#  define NUM_RCU_LVL_0              1
+#  define NUM_RCU_LVL_1              (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
+#  define NUM_RCU_LVL_2              (NR_CPUS)
+#  define NUM_RCU_LVL_3              0
+#elif NR_CPUS <= RCU_FANOUT_CUBE
+#  define NUM_RCU_LVLS       3
+#  define NUM_RCU_LVL_0              1
+#  define NUM_RCU_LVL_1              (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
+#  define NUM_RCU_LVL_2              (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
+#  define NUM_RCU_LVL_3              NR_CPUS
+#else
+# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
+#endif /* #if (NR_CPUS) <= RCU_FANOUT */
+
+#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
+
+/*
+ * Dynticks per-CPU state.
+ */
+struct rcu_dynticks {
+       int dynticks_nesting;   /* Track nesting level, sort of. */
+       int dynticks;           /* Even value for dynticks-idle, else odd. */
+       int dynticks_nmi;       /* Even value for either dynticks-idle or */
+                               /*  not in nmi handler, else odd.  So this */
+                               /*  remains even for nmi from irq handler. */
+};
+
+/*
+ * Definition for node within the RCU grace-period-detection hierarchy.
+ */
+struct rcu_node {
+       spinlock_t lock;
+       unsigned long qsmask;   /* CPUs or groups that need to switch in */
+                               /*  order for current grace period to proceed.*/
+       unsigned long qsmaskinit;
+                               /* Per-GP initialization for qsmask. */
+       unsigned long grpmask;  /* Mask to apply to parent qsmask. */
+       int     grplo;          /* lowest-numbered CPU or group here. */
+       int     grphi;          /* highest-numbered CPU or group here. */
+       u8      grpnum;         /* CPU/group number for next level up. */
+       u8      level;          /* root is at level 0. */
+       struct rcu_node *parent;
+} ____cacheline_internodealigned_in_smp;
+
+/* Index values for nxttail array in struct rcu_data. */
+#define RCU_DONE_TAIL          0       /* Also RCU_WAIT head. */
+#define RCU_WAIT_TAIL          1       /* Also RCU_NEXT_READY head. */
+#define RCU_NEXT_READY_TAIL    2       /* Also RCU_NEXT head. */
+#define RCU_NEXT_TAIL          3
+#define RCU_NEXT_SIZE          4
+
+/* Per-CPU data for read-copy update. */
+struct rcu_data {
+       /* 1) quiescent-state and grace-period handling : */
+       long            completed;      /* Track rsp->completed gp number */
+                                       /*  in order to detect GP end. */
+       long            gpnum;          /* Highest gp number that this CPU */
+                                       /*  is aware of having started. */
+       long            passed_quiesc_completed;
+                                       /* Value of completed at time of qs. */
+       bool            passed_quiesc;  /* User-mode/idle loop etc. */
+       bool            qs_pending;     /* Core waits for quiesc state. */
+       bool            beenonline;     /* CPU online at least once. */
+       struct rcu_node *mynode;        /* This CPU's leaf of hierarchy */
+       unsigned long grpmask;          /* Mask to apply to leaf qsmask. */
+
+       /* 2) batch handling */
+       /*
+        * If nxtlist is not NULL, it is partitioned as follows.
+        * Any of the partitions might be empty, in which case the
+        * pointer to that partition will be equal to the pointer for
+        * the following partition.  When the list is empty, all of
+        * the nxttail elements point to nxtlist, which is NULL.
+        *
+        * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
+        *      Entries that might have arrived after current GP ended
+        * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
+        *      Entries known to have arrived before current GP ended
+        * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
+        *      Entries that batch # <= ->completed - 1: waiting for current GP
+        * [nxtlist, *nxttail[RCU_DONE_TAIL]):
+        *      Entries that batch # <= ->completed
+        *      The grace period for these entries has completed, and
+        *      the other grace-period-completed entries may be moved
+        *      here temporarily in rcu_process_callbacks().
+        */
+       struct rcu_head *nxtlist;
+       struct rcu_head **nxttail[RCU_NEXT_SIZE];
+       long            qlen;           /* # of queued callbacks */
+       long            blimit;         /* Upper limit on a processed batch */
+
+#ifdef CONFIG_NO_HZ
+       /* 3) dynticks interface. */
+       struct rcu_dynticks *dynticks;  /* Shared per-CPU dynticks state. */
+       int dynticks_snap;              /* Per-GP tracking for dynticks. */
+       int dynticks_nmi_snap;          /* Per-GP tracking for dynticks_nmi. */
+#endif /* #ifdef CONFIG_NO_HZ */
+
+       /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
+#ifdef CONFIG_NO_HZ
+       unsigned long dynticks_fqs;     /* Kicked due to dynticks idle. */
+#endif /* #ifdef CONFIG_NO_HZ */
+       unsigned long offline_fqs;      /* Kicked due to being offline. */
+       unsigned long resched_ipi;      /* Sent a resched IPI. */
+
+       /* 5) state to allow this CPU to force_quiescent_state on others */
+       long n_rcu_pending;             /* rcu_pending() calls since boot. */
+       long n_rcu_pending_force_qs;    /* when to force quiescent states. */
+
+       int cpu;
+};
+
+/* Values for signaled field in struct rcu_state. */
+#define RCU_GP_INIT            0       /* Grace period being initialized. */
+#define RCU_SAVE_DYNTICK       1       /* Need to scan dyntick state. */
+#define RCU_FORCE_QS           2       /* Need to force quiescent state. */
+#ifdef CONFIG_NO_HZ
+#define RCU_SIGNAL_INIT                RCU_SAVE_DYNTICK
+#else /* #ifdef CONFIG_NO_HZ */
+#define RCU_SIGNAL_INIT                RCU_FORCE_QS
+#endif /* #else #ifdef CONFIG_NO_HZ */
+
+#define RCU_JIFFIES_TILL_FORCE_QS       3      /* for rsp->jiffies_force_qs */
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+#define RCU_SECONDS_TILL_STALL_CHECK   (10 * HZ)  /* for rsp->jiffies_stall */
+#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ)  /* for rsp->jiffies_stall */
+#define RCU_STALL_RAT_DELAY            2         /* Allow other CPUs time */
+                                                 /*  to take at least one */
+                                                 /*  scheduling clock irq */
+                                                 /*  before ratting on them. */
+
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+/*
+ * RCU global state, including node hierarchy.  This hierarchy is
+ * represented in "heap" form in a dense array.  The root (first level)
+ * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
+ * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
+ * and the third level in ->node[m+1] and following (->node[m+1] referenced
+ * by ->level[2]).  The number of levels is determined by the number of
+ * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy"
+ * consisting of a single rcu_node.
+ */
+struct rcu_state {
+       struct rcu_node node[NUM_RCU_NODES];    /* Hierarchy. */
+       struct rcu_node *level[NUM_RCU_LVLS];   /* Hierarchy levels. */
+       u32 levelcnt[MAX_RCU_LVLS + 1];         /* # nodes in each level. */
+       u8 levelspread[NUM_RCU_LVLS];           /* kids/node in each level. */
+       struct rcu_data *rda[NR_CPUS];          /* array of rdp pointers. */
+
+       /* The following fields are guarded by the root rcu_node's lock. */
+
+       u8      signaled ____cacheline_internodealigned_in_smp;
+                                               /* Force QS state. */
+       long    gpnum;                          /* Current gp number. */
+       long    completed;                      /* # of last completed gp. */
+       spinlock_t onofflock;                   /* exclude on/offline and */
+                                               /*  starting new GP. */
+       spinlock_t fqslock;                     /* Only one task forcing */
+                                               /*  quiescent states. */
+       unsigned long jiffies_force_qs;         /* Time at which to invoke */
+                                               /*  force_quiescent_state(). */
+       unsigned long n_force_qs;               /* Number of calls to */
+                                               /*  force_quiescent_state(). */
+       unsigned long n_force_qs_lh;            /* ~Number of calls leaving */
+                                               /*  due to lock unavailable. */
+       unsigned long n_force_qs_ngp;           /* Number of calls leaving */
+                                               /*  due to no GP active. */
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+       unsigned long gp_start;                 /* Time at which GP started, */
+                                               /*  but in jiffies. */
+       unsigned long jiffies_stall;            /* Time at which to check */
+                                               /*  for CPU stalls. */
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+#ifdef CONFIG_NO_HZ
+       long dynticks_completed;                /* Value of completed @ snap. */
+#endif /* #ifdef CONFIG_NO_HZ */
+};
+
+extern struct rcu_state rcu_state;
+DECLARE_PER_CPU(struct rcu_data, rcu_data);
+
+extern struct rcu_state rcu_bh_state;
+DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
+
+/*
+ * Increment the quiescent state counter.
+ * The counter is a bit degenerated: We do not need to know
+ * how many quiescent states passed, just if there was at least
+ * one since the start of the grace period. Thus just a flag.
+ */
+static inline void rcu_qsctr_inc(int cpu)
+{
+       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+       rdp->passed_quiesc = 1;
+       rdp->passed_quiesc_completed = rdp->completed;
+}
+static inline void rcu_bh_qsctr_inc(int cpu)
+{
+       struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+       rdp->passed_quiesc = 1;
+       rdp->passed_quiesc_completed = rdp->completed;
+}
+
+extern int rcu_pending(int cpu);
+extern int rcu_needs_cpu(int cpu);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern struct lockdep_map rcu_lock_map;
+# define rcu_read_acquire()    \
+                       lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
+# define rcu_read_release()    lock_release(&rcu_lock_map, 1, _THIS_IP_)
+#else
+# define rcu_read_acquire()    do { } while (0)
+# define rcu_read_release()    do { } while (0)
+#endif
+
+static inline void __rcu_read_lock(void)
+{
+       preempt_disable();
+       __acquire(RCU);
+       rcu_read_acquire();
+}
+static inline void __rcu_read_unlock(void)
+{
+       rcu_read_release();
+       __release(RCU);
+       preempt_enable();
+}
+static inline void __rcu_read_lock_bh(void)
+{
+       local_bh_disable();
+       __acquire(RCU_BH);
+       rcu_read_acquire();
+}
+static inline void __rcu_read_unlock_bh(void)
+{
+       rcu_read_release();
+       __release(RCU_BH);
+       local_bh_enable();
+}
+
+#define __synchronize_sched() synchronize_rcu()
+
+#define call_rcu_sched(head, func) call_rcu(head, func)
+
+static inline void rcu_init_sched(void)
+{
+}
+
+extern void __rcu_init(void);
+extern void rcu_check_callbacks(int cpu, int user);
+extern void rcu_restart_cpu(int cpu);
+
+extern long rcu_batches_completed(void);
+extern long rcu_batches_completed_bh(void);
+
+#ifdef CONFIG_NO_HZ
+void rcu_enter_nohz(void);
+void rcu_exit_nohz(void);
+#else /* CONFIG_NO_HZ */
+static inline void rcu_enter_nohz(void)
+{
+}
+static inline void rcu_exit_nohz(void)
+{
+}
+#endif /* CONFIG_NO_HZ */
+
+#endif /* __LINUX_RCUTREE_H */
index b18ec5533e8c594509e83a666cac089a774a060f..325af1de0351a0062fab92f2a6d8d2226879a2be 100644 (file)
@@ -7,9 +7,31 @@ struct device;
 struct dma_attrs;
 struct scatterlist;
 
+/*
+ * Maximum allowable number of contiguous slabs to map,
+ * must be a power of 2.  What is the appropriate value ?
+ * The complexity of {map,unmap}_single is linearly dependent on this value.
+ */
+#define IO_TLB_SEGSIZE 128
+
+
+/*
+ * log of the size of each IO TLB slab.  The number of slabs is command line
+ * controllable.
+ */
+#define IO_TLB_SHIFT 11
+
 extern void
 swiotlb_init(void);
 
+extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
+extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
+
+extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address);
+extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address);
+
+extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size);
+
 extern void
 *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                        dma_addr_t *dma_handle, gfp_t flags);
index 9007313b5b7168ae03ed85e63d80ce6afe0ce438..998a55d80acf1b1c62bd8d91755448f9a0edebd6 100644 (file)
 #ifndef _LINUX_TIMEX_H
 #define _LINUX_TIMEX_H
 
-#include <linux/compiler.h>
 #include <linux/time.h>
 
-#include <asm/param.h>
-
 #define NTP_API                4       /* NTP API version */
 
-/*
- * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
- * for a slightly underdamped convergence characteristic. SHIFT_KH
- * establishes the damping of the FLL and is chosen by wisdom and black
- * art.
- *
- * MAXTC establishes the maximum time constant of the PLL. With the
- * SHIFT_KG and SHIFT_KF values given and a time constant range from
- * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
- * respectively.
- */
-#define SHIFT_PLL      4       /* PLL frequency factor (shift) */
-#define SHIFT_FLL      2       /* FLL frequency factor (shift) */
-#define MAXTC          10      /* maximum time constant (shift) */
-
-/*
- * SHIFT_USEC defines the scaling (shift) of the time_freq and
- * time_tolerance variables, which represent the current frequency
- * offset and maximum frequency tolerance.
- */
-#define SHIFT_USEC 16          /* frequency offset scale (shift) */
-#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
-#define PPM_SCALE_INV_SHIFT 19
-#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
-                      PPM_SCALE + 1)
-
-#define MAXPHASE 500000000l    /* max phase error (ns) */
-#define MAXFREQ 500000         /* max frequency error (ns/s) */
-#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
-#define MINSEC 256             /* min interval between updates (s) */
-#define MAXSEC 2048            /* max interval between updates (s) */
-#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
-
 /*
  * syscall interface - used (mainly by NTP daemon)
  * to discipline kernel clock oscillator
@@ -199,8 +163,45 @@ struct timex {
 #define TIME_BAD       TIME_ERROR /* bw compat */
 
 #ifdef __KERNEL__
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/param.h>
+
 #include <asm/timex.h>
 
+/*
+ * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
+ * for a slightly underdamped convergence characteristic. SHIFT_KH
+ * establishes the damping of the FLL and is chosen by wisdom and black
+ * art.
+ *
+ * MAXTC establishes the maximum time constant of the PLL. With the
+ * SHIFT_KG and SHIFT_KF values given and a time constant range from
+ * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
+ * respectively.
+ */
+#define SHIFT_PLL      4       /* PLL frequency factor (shift) */
+#define SHIFT_FLL      2       /* FLL frequency factor (shift) */
+#define MAXTC          10      /* maximum time constant (shift) */
+
+/*
+ * SHIFT_USEC defines the scaling (shift) of the time_freq and
+ * time_tolerance variables, which represent the current frequency
+ * offset and maximum frequency tolerance.
+ */
+#define SHIFT_USEC 16          /* frequency offset scale (shift) */
+#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
+#define PPM_SCALE_INV_SHIFT 19
+#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
+                      PPM_SCALE + 1)
+
+#define MAXPHASE 500000000l    /* max phase error (ns) */
+#define MAXFREQ 500000         /* max frequency error (ns/s) */
+#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
+#define MINSEC 256             /* min interval between updates (s) */
+#define MAXSEC 2048            /* max interval between updates (s) */
+#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
+
 /*
  * kernel variables
  * Note: maximum error = NTP synch distance = dispersion + delay / 2;
index 1d98330b1f2c174fa239f8dbfc0b5533b4f99dc6..121f349cb7ec0a5063711d6656d6c45e42521b06 100644 (file)
@@ -135,19 +135,14 @@ typedef           __s64           int64_t;
  *
  * Linux always considers sectors to be 512 bytes long independently
  * of the devices real block size.
+ *
+ * blkcnt_t is the type of the inode's block count.
  */
 #ifdef CONFIG_LBD
 typedef u64 sector_t;
-#else
-typedef unsigned long sector_t;
-#endif
-
-/*
- * The type of the inode's block count.
- */
-#ifdef CONFIG_LSF
 typedef u64 blkcnt_t;
 #else
+typedef unsigned long sector_t;
 typedef unsigned long blkcnt_t;
 #endif
 
index fec6decfb983503ec8ae24bafa793f09b5f902b2..6b58367d145e8735eaebbf16bfec0733406389fb 100644 (file)
@@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
                                                        \
                set_fs(KERNEL_DS);                      \
                pagefault_disable();                    \
-               ret = __get_user(retval, (__force typeof(retval) __user *)(addr));              \
+               ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval));            \
                pagefault_enable();                     \
                set_fs(old_fs);                         \
                ret;                                    \
index 8a63c404ef449d391a08ffd5760f46325f76babf..13627191a60d194de08aaa4b410aa752cfd7cb21 100644 (file)
@@ -936,10 +936,90 @@ source "block/Kconfig"
 config PREEMPT_NOTIFIERS
        bool
 
+choice
+       prompt "RCU Implementation"
+       default CLASSIC_RCU
+
 config CLASSIC_RCU
-       def_bool !PREEMPT_RCU
+       bool "Classic RCU"
        help
          This option selects the classic RCU implementation that is
          designed for best read-side performance on non-realtime
-         systems.  Classic RCU is the default.  Note that the
-         PREEMPT_RCU symbol is used to select/deselect this option.
+         systems.
+
+         Select this option if you are unsure.
+
+config TREE_RCU
+       bool "Tree-based hierarchical RCU"
+       help
+         This option selects the RCU implementation that is
+         designed for very large SMP system with hundreds or
+         thousands of CPUs.
+
+config PREEMPT_RCU
+       bool "Preemptible RCU"
+       depends on PREEMPT
+       help
+         This option reduces the latency of the kernel by making certain
+         RCU sections preemptible. Normally RCU code is non-preemptible, if
+         this option is selected then read-only RCU sections become
+         preemptible. This helps latency, but may expose bugs due to
+         now-naive assumptions about each RCU read-side critical section
+         remaining on a given CPU through its execution.
+
+endchoice
+
+config RCU_TRACE
+       bool "Enable tracing for RCU"
+       depends on TREE_RCU || PREEMPT_RCU
+       help
+         This option provides tracing in RCU which presents stats
+         in debugfs for debugging RCU implementation.
+
+         Say Y here if you want to enable RCU tracing
+         Say N if you are unsure.
+
+config RCU_FANOUT
+       int "Tree-based hierarchical RCU fanout value"
+       range 2 64 if 64BIT
+       range 2 32 if !64BIT
+       depends on TREE_RCU
+       default 64 if 64BIT
+       default 32 if !64BIT
+       help
+         This option controls the fanout of hierarchical implementations
+         of RCU, allowing RCU to work efficiently on machines with
+         large numbers of CPUs.  This value must be at least the cube
+         root of NR_CPUS, which allows NR_CPUS up to 32,768 for 32-bit
+         systems and up to 262,144 for 64-bit systems.
+
+         Select a specific number if testing RCU itself.
+         Take the default if unsure.
+
+config RCU_FANOUT_EXACT
+       bool "Disable tree-based hierarchical RCU auto-balancing"
+       depends on TREE_RCU
+       default n
+       help
+         This option forces use of the exact RCU_FANOUT value specified,
+         regardless of imbalances in the hierarchy.  This is useful for
+         testing RCU itself, and might one day be useful on systems with
+         strong NUMA behavior.
+
+         Without RCU_FANOUT_EXACT, the code will balance the hierarchy.
+
+         Say N if unsure.
+
+config TREE_RCU_TRACE
+       def_bool RCU_TRACE && TREE_RCU
+       select DEBUG_FS
+       help
+         This option provides tracing for the TREE_RCU implementation,
+         permitting Makefile to trivially select kernel/rcutree_trace.c.
+
+config PREEMPT_RCU_TRACE
+       def_bool RCU_TRACE && PREEMPT_RCU
+       select DEBUG_FS
+       help
+         This option provides tracing for the PREEMPT_RCU implementation,
+         permitting Makefile to trivially select kernel/rcupreempt_trace.c.
index 17e9757bfde2310c6bc7a8407fee6eba7cd4b0ad..2a7ce0f8e45353af2204c017c1921afdd0ba5c91 100644 (file)
@@ -540,6 +540,15 @@ void __init __weak thread_info_cache_init(void)
 {
 }
 
+void __init __weak arch_early_irq_init(void)
+{
+}
+
+void __init __weak early_irq_init(void)
+{
+       arch_early_irq_init();
+}
+
 asmlinkage void __init start_kernel(void)
 {
        char * command_line;
@@ -604,6 +613,8 @@ asmlinkage void __init start_kernel(void)
        sort_main_extable();
        trap_init();
        rcu_init();
+       /* init some links before init_ISA_irqs() */
+       early_irq_init();
        init_IRQ();
        pidhash_init();
        init_timers();
index 9fdba03dc1fcd119c31274fb3590480aa1db9f05..bf987b95b3560fbd0a7d8a820335100a55e1d105 100644 (file)
@@ -52,28 +52,3 @@ config PREEMPT
 
 endchoice
 
-config PREEMPT_RCU
-       bool "Preemptible RCU"
-       depends on PREEMPT
-       default n
-       help
-         This option reduces the latency of the kernel by making certain
-         RCU sections preemptible. Normally RCU code is non-preemptible, if
-         this option is selected then read-only RCU sections become
-         preemptible. This helps latency, but may expose bugs due to
-         now-naive assumptions about each RCU read-side critical section
-         remaining on a given CPU through its execution.
-
-         Say N if you are unsure.
-
-config RCU_TRACE
-       bool "Enable tracing for RCU - currently stats in debugfs"
-       depends on PREEMPT_RCU
-       select DEBUG_FS
-       default y
-       help
-         This option provides tracing in RCU which presents stats
-         in debugfs for debugging RCU implementation.
-
-         Say Y here if you want to enable RCU tracing
-         Say N if you are unsure.
index 027edda6351137b5f8c1ee085c2c5320d204e904..e1c5bf3365c0a4cdee8e0279f31b3c6742a9c6ef 100644 (file)
@@ -73,10 +73,10 @@ obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
 obj-$(CONFIG_SECCOMP) += seccomp.o
 obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
 obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o
+obj-$(CONFIG_TREE_RCU) += rcutree.o
 obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o
-ifeq ($(CONFIG_PREEMPT_RCU),y)
-obj-$(CONFIG_RCU_TRACE) += rcupreempt_trace.o
-endif
+obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
+obj-$(CONFIG_PREEMPT_RCU_TRACE) += rcupreempt_trace.o
 obj-$(CONFIG_RELAY) += relay.o
 obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
 obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
index c7422ca920382b47445ace263bdba0fe6fbf6bf8..c9e5a1c14e081eb8dd9873e0eef89d964c344bc1 100644 (file)
@@ -1037,8 +1037,6 @@ NORET_TYPE void do_exit(long code)
                 * task into the wait for ever nirwana as well.
                 */
                tsk->flags |= PF_EXITPIDONE;
-               if (tsk->io_context)
-                       exit_io_context();
                set_current_state(TASK_UNINTERRUPTIBLE);
                schedule();
        }
@@ -1328,10 +1326,10 @@ static int wait_task_zombie(struct task_struct *p, int options,
                 * group, which consolidates times for all threads in the
                 * group including the group leader.
                 */
+               thread_group_cputime(p, &cputime);
                spin_lock_irq(&p->parent->sighand->siglock);
                psig = p->parent->signal;
                sig = p->signal;
-               thread_group_cputime(p, &cputime);
                psig->cutime =
                        cputime_add(psig->cutime,
                        cputime_add(cputime.utime,
index feb0317cf09ab5960d0288a31ec00d03ae5a6335..e136ed8d82ba56ab81283a48fe3d6cfb41c23c79 100644 (file)
@@ -67,3 +67,19 @@ int kernel_text_address(unsigned long addr)
                return 1;
        return module_text_address(addr) != NULL;
 }
+
+/*
+ * On some architectures (PPC64, IA64) function pointers
+ * are actually only tokens to some data that then holds the
+ * real function address. As a result, to find if a function
+ * pointer is part of the kernel text, we need to do some
+ * special dereferencing first.
+ */
+int func_ptr_is_kernel_text(void *ptr)
+{
+       unsigned long addr;
+       addr = (unsigned long) dereference_function_descriptor(ptr);
+       if (core_kernel_text(addr))
+               return 1;
+       return module_text_address(addr) != NULL;
+}
index 6144b36cd897512273f18c94f0d89958b21666c4..43cbf30669e6de76d4bc315864489deab3336eff 100644 (file)
@@ -415,8 +415,8 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
        set_mm_counter(mm, file_rss, 0);
        set_mm_counter(mm, anon_rss, 0);
        spin_lock_init(&mm->page_table_lock);
-       rwlock_init(&mm->ioctx_list_lock);
-       mm->ioctx_list = NULL;
+       spin_lock_init(&mm->ioctx_lock);
+       INIT_HLIST_HEAD(&mm->ioctx_list);
        mm->free_area_cache = TASK_UNMAPPED_BASE;
        mm->cached_hole_size = ~0UL;
        mm_init_owner(mm, p);
index 4fe790e89d0f34af1cc24359d32bca3b58e970ca..7c6cbabe52b3c0368e800790b638e82eb00aa657 100644 (file)
@@ -92,11 +92,12 @@ struct futex_pi_state {
  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
  * The order of wakup is always to make the first condition true, then
- * wake up q->waiters, then make the second condition true.
+ * wake up q->waiter, then make the second condition true.
  */
 struct futex_q {
        struct plist_node list;
-       wait_queue_head_t waiters;
+       /* There can only be a single waiter */
+       wait_queue_head_t waiter;
 
        /* Which hash list lock to use: */
        spinlock_t *lock_ptr;
@@ -122,24 +123,6 @@ struct futex_hash_bucket {
 
 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
 
-/*
- * Take mm->mmap_sem, when futex is shared
- */
-static inline void futex_lock_mm(struct rw_semaphore *fshared)
-{
-       if (fshared)
-               down_read(fshared);
-}
-
-/*
- * Release mm->mmap_sem, when the futex is shared
- */
-static inline void futex_unlock_mm(struct rw_semaphore *fshared)
-{
-       if (fshared)
-               up_read(fshared);
-}
-
 /*
  * We hash on the keys returned from get_futex_key (see below).
  */
@@ -161,6 +144,45 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
                && key1->both.offset == key2->both.offset);
 }
 
+/*
+ * Take a reference to the resource addressed by a key.
+ * Can be called while holding spinlocks.
+ *
+ */
+static void get_futex_key_refs(union futex_key *key)
+{
+       if (!key->both.ptr)
+               return;
+
+       switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
+       case FUT_OFF_INODE:
+               atomic_inc(&key->shared.inode->i_count);
+               break;
+       case FUT_OFF_MMSHARED:
+               atomic_inc(&key->private.mm->mm_count);
+               break;
+       }
+}
+
+/*
+ * Drop a reference to the resource addressed by a key.
+ * The hash bucket spinlock must not be held.
+ */
+static void drop_futex_key_refs(union futex_key *key)
+{
+       if (!key->both.ptr)
+               return;
+
+       switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
+       case FUT_OFF_INODE:
+               iput(key->shared.inode);
+               break;
+       case FUT_OFF_MMSHARED:
+               mmdrop(key->private.mm);
+               break;
+       }
+}
+
 /**
  * get_futex_key - Get parameters which are the keys for a futex.
  * @uaddr: virtual address of the futex
@@ -179,12 +201,10 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
  * For other futexes, it points to &current->mm->mmap_sem and
  * caller must have taken the reader lock. but NOT any spinlocks.
  */
-static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
-                        union futex_key *key)
+static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
 {
        unsigned long address = (unsigned long)uaddr;
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
        struct page *page;
        int err;
 
@@ -208,100 +228,50 @@ static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
                        return -EFAULT;
                key->private.mm = mm;
                key->private.address = address;
+               get_futex_key_refs(key);
                return 0;
        }
-       /*
-        * The futex is hashed differently depending on whether
-        * it's in a shared or private mapping.  So check vma first.
-        */
-       vma = find_extend_vma(mm, address);
-       if (unlikely(!vma))
-               return -EFAULT;
 
-       /*
-        * Permissions.
-        */
-       if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
-               return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
+again:
+       err = get_user_pages_fast(address, 1, 0, &page);
+       if (err < 0)
+               return err;
+
+       lock_page(page);
+       if (!page->mapping) {
+               unlock_page(page);
+               put_page(page);
+               goto again;
+       }
 
        /*
         * Private mappings are handled in a simple way.
         *
         * NOTE: When userspace waits on a MAP_SHARED mapping, even if
         * it's a read-only handle, it's expected that futexes attach to
-        * the object not the particular process.  Therefore we use
-        * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
-        * mappings of _writable_ handles.
+        * the object not the particular process.
         */
-       if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
-               key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */
+       if (PageAnon(page)) {
+               key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
                key->private.mm = mm;
                key->private.address = address;
-               return 0;
+       } else {
+               key->both.offset |= FUT_OFF_INODE; /* inode-based key */
+               key->shared.inode = page->mapping->host;
+               key->shared.pgoff = page->index;
        }
 
-       /*
-        * Linear file mappings are also simple.
-        */
-       key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
-       key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
-       if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
-               key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
-                                    + vma->vm_pgoff);
-               return 0;
-       }
+       get_futex_key_refs(key);
 
-       /*
-        * We could walk the page table to read the non-linear
-        * pte, and get the page index without fetching the page
-        * from swap.  But that's a lot of code to duplicate here
-        * for a rare case, so we simply fetch the page.
-        */
-       err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
-       if (err >= 0) {
-               key->shared.pgoff =
-                       page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-               put_page(page);
-               return 0;
-       }
-       return err;
-}
-
-/*
- * Take a reference to the resource addressed by a key.
- * Can be called while holding spinlocks.
- *
- */
-static void get_futex_key_refs(union futex_key *key)
-{
-       if (key->both.ptr == NULL)
-               return;
-       switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
-               case FUT_OFF_INODE:
-                       atomic_inc(&key->shared.inode->i_count);
-                       break;
-               case FUT_OFF_MMSHARED:
-                       atomic_inc(&key->private.mm->mm_count);
-                       break;
-       }
+       unlock_page(page);
+       put_page(page);
+       return 0;
 }
 
-/*
- * Drop a reference to the resource addressed by a key.
- * The hash bucket spinlock must not be held.
- */
-static void drop_futex_key_refs(union futex_key *key)
+static inline
+void put_futex_key(int fshared, union futex_key *key)
 {
-       if (!key->both.ptr)
-               return;
-       switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
-               case FUT_OFF_INODE:
-                       iput(key->shared.inode);
-                       break;
-               case FUT_OFF_MMSHARED:
-                       mmdrop(key->private.mm);
-                       break;
-       }
+       drop_futex_key_refs(key);
 }
 
 static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
@@ -328,10 +298,8 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from)
 
 /*
  * Fault handling.
- * if fshared is non NULL, current->mm->mmap_sem is already held
  */
-static int futex_handle_fault(unsigned long address,
-                             struct rw_semaphore *fshared, int attempt)
+static int futex_handle_fault(unsigned long address, int attempt)
 {
        struct vm_area_struct * vma;
        struct mm_struct *mm = current->mm;
@@ -340,8 +308,7 @@ static int futex_handle_fault(unsigned long address,
        if (attempt > 2)
                return ret;
 
-       if (!fshared)
-               down_read(&mm->mmap_sem);
+       down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
        if (vma && address >= vma->vm_start &&
            (vma->vm_flags & VM_WRITE)) {
@@ -361,8 +328,7 @@ static int futex_handle_fault(unsigned long address,
                                current->min_flt++;
                }
        }
-       if (!fshared)
-               up_read(&mm->mmap_sem);
+       up_read(&mm->mmap_sem);
        return ret;
 }
 
@@ -385,6 +351,7 @@ static int refill_pi_state_cache(void)
        /* pi_mutex gets initialized later */
        pi_state->owner = NULL;
        atomic_set(&pi_state->refcount, 1);
+       pi_state->key = FUTEX_KEY_INIT;
 
        current->pi_state_cache = pi_state;
 
@@ -469,7 +436,7 @@ void exit_pi_state_list(struct task_struct *curr)
        struct list_head *next, *head = &curr->pi_state_list;
        struct futex_pi_state *pi_state;
        struct futex_hash_bucket *hb;
-       union futex_key key;
+       union futex_key key = FUTEX_KEY_INIT;
 
        if (!futex_cmpxchg_enabled)
                return;
@@ -614,7 +581,7 @@ static void wake_futex(struct futex_q *q)
         * The lock in wake_up_all() is a crucial memory barrier after the
         * plist_del() and also before assigning to q->lock_ptr.
         */
-       wake_up_all(&q->waiters);
+       wake_up(&q->waiter);
        /*
         * The waiting task can free the futex_q as soon as this is written,
         * without taking any locks.  This must come last.
@@ -726,20 +693,17 @@ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
  * Wake up all waiters hashed on the physical page that is mapped
  * to this virtual address:
  */
-static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
-                     int nr_wake, u32 bitset)
+static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
 {
        struct futex_hash_bucket *hb;
        struct futex_q *this, *next;
        struct plist_head *head;
-       union futex_key key;
+       union futex_key key = FUTEX_KEY_INIT;
        int ret;
 
        if (!bitset)
                return -EINVAL;
 
-       futex_lock_mm(fshared);
-
        ret = get_futex_key(uaddr, fshared, &key);
        if (unlikely(ret != 0))
                goto out;
@@ -767,7 +731,7 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
 
        spin_unlock(&hb->lock);
 out:
-       futex_unlock_mm(fshared);
+       put_futex_key(fshared, &key);
        return ret;
 }
 
@@ -776,19 +740,16 @@ out:
  * to this virtual address:
  */
 static int
-futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
-             u32 __user *uaddr2,
+futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
              int nr_wake, int nr_wake2, int op)
 {
-       union futex_key key1, key2;
+       union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
        struct futex_hash_bucket *hb1, *hb2;
        struct plist_head *head;
        struct futex_q *this, *next;
        int ret, op_ret, attempt = 0;
 
 retryfull:
-       futex_lock_mm(fshared);
-
        ret = get_futex_key(uaddr1, fshared, &key1);
        if (unlikely(ret != 0))
                goto out;
@@ -833,18 +794,12 @@ retry:
                 */
                if (attempt++) {
                        ret = futex_handle_fault((unsigned long)uaddr2,
-                                                fshared, attempt);
+                                                attempt);
                        if (ret)
                                goto out;
                        goto retry;
                }
 
-               /*
-                * If we would have faulted, release mmap_sem,
-                * fault it in and start all over again.
-                */
-               futex_unlock_mm(fshared);
-
                ret = get_user(dummy, uaddr2);
                if (ret)
                        return ret;
@@ -880,7 +835,8 @@ retry:
        if (hb1 != hb2)
                spin_unlock(&hb2->lock);
 out:
-       futex_unlock_mm(fshared);
+       put_futex_key(fshared, &key2);
+       put_futex_key(fshared, &key1);
 
        return ret;
 }
@@ -889,19 +845,16 @@ out:
  * Requeue all waiters hashed on one physical page to another
  * physical page.
  */
-static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
-                        u32 __user *uaddr2,
+static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
                         int nr_wake, int nr_requeue, u32 *cmpval)
 {
-       union futex_key key1, key2;
+       union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
        struct futex_hash_bucket *hb1, *hb2;
        struct plist_head *head1;
        struct futex_q *this, *next;
        int ret, drop_count = 0;
 
  retry:
-       futex_lock_mm(fshared);
-
        ret = get_futex_key(uaddr1, fshared, &key1);
        if (unlikely(ret != 0))
                goto out;
@@ -924,12 +877,6 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
                        if (hb1 != hb2)
                                spin_unlock(&hb2->lock);
 
-                       /*
-                        * If we would have faulted, release mmap_sem, fault
-                        * it in and start all over again.
-                        */
-                       futex_unlock_mm(fshared);
-
                        ret = get_user(curval, uaddr1);
 
                        if (!ret)
@@ -981,7 +928,8 @@ out_unlock:
                drop_futex_key_refs(&key1);
 
 out:
-       futex_unlock_mm(fshared);
+       put_futex_key(fshared, &key2);
+       put_futex_key(fshared, &key1);
        return ret;
 }
 
@@ -990,7 +938,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
 {
        struct futex_hash_bucket *hb;
 
-       init_waitqueue_head(&q->waiters);
+       init_waitqueue_head(&q->waiter);
 
        get_futex_key_refs(&q->key);
        hb = hash_futex(&q->key);
@@ -1103,8 +1051,7 @@ static void unqueue_me_pi(struct futex_q *q)
  * private futexes.
  */
 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-                               struct task_struct *newowner,
-                               struct rw_semaphore *fshared)
+                               struct task_struct *newowner, int fshared)
 {
        u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
        struct futex_pi_state *pi_state = q->pi_state;
@@ -1183,7 +1130,7 @@ retry:
 handle_fault:
        spin_unlock(q->lock_ptr);
 
-       ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++);
+       ret = futex_handle_fault((unsigned long)uaddr, attempt++);
 
        spin_lock(q->lock_ptr);
 
@@ -1203,12 +1150,13 @@ handle_fault:
  * In case we must use restart_block to restart a futex_wait,
  * we encode in the 'flags' shared capability
  */
-#define FLAGS_SHARED  1
+#define FLAGS_SHARED           0x01
+#define FLAGS_CLOCKRT          0x02
 
 static long futex_wait_restart(struct restart_block *restart);
 
-static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
-                     u32 val, ktime_t *abs_time, u32 bitset)
+static int futex_wait(u32 __user *uaddr, int fshared,
+                     u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
 {
        struct task_struct *curr = current;
        DECLARE_WAITQUEUE(wait, curr);
@@ -1225,8 +1173,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
        q.pi_state = NULL;
        q.bitset = bitset;
  retry:
-       futex_lock_mm(fshared);
-
+       q.key = FUTEX_KEY_INIT;
        ret = get_futex_key(uaddr, fshared, &q.key);
        if (unlikely(ret != 0))
                goto out_release_sem;
@@ -1258,12 +1205,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
        if (unlikely(ret)) {
                queue_unlock(&q, hb);
 
-               /*
-                * If we would have faulted, release mmap_sem, fault it in and
-                * start all over again.
-                */
-               futex_unlock_mm(fshared);
-
                ret = get_user(uval, uaddr);
 
                if (!ret)
@@ -1277,12 +1218,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
        /* Only actually queue if *uaddr contained val.  */
        queue_me(&q, hb);
 
-       /*
-        * Now the futex is queued and we have checked the data, we
-        * don't want to hold mmap_sem while we sleep.
-        */
-       futex_unlock_mm(fshared);
-
        /*
         * There might have been scheduling since the queue_me(), as we
         * cannot hold a spinlock across the get_user() in case it
@@ -1294,7 +1229,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
 
        /* add_wait_queue is the barrier after __set_current_state. */
        __set_current_state(TASK_INTERRUPTIBLE);
-       add_wait_queue(&q.waiters, &wait);
+       add_wait_queue(&q.waiter, &wait);
        /*
         * !plist_node_empty() is safe here without any lock.
         * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
@@ -1307,8 +1242,10 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
                        slack = current->timer_slack_ns;
                        if (rt_task(current))
                                slack = 0;
-                       hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC,
-                                               HRTIMER_MODE_ABS);
+                       hrtimer_init_on_stack(&t.timer,
+                                             clockrt ? CLOCK_REALTIME :
+                                             CLOCK_MONOTONIC,
+                                             HRTIMER_MODE_ABS);
                        hrtimer_init_sleeper(&t, current);
                        hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
 
@@ -1363,6 +1300,8 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
 
                if (fshared)
                        restart->futex.flags |= FLAGS_SHARED;
+               if (clockrt)
+                       restart->futex.flags |= FLAGS_CLOCKRT;
                return -ERESTART_RESTARTBLOCK;
        }
 
@@ -1370,7 +1309,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
        queue_unlock(&q, hb);
 
  out_release_sem:
-       futex_unlock_mm(fshared);
+       put_futex_key(fshared, &q.key);
        return ret;
 }
 
@@ -1378,15 +1317,16 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
 static long futex_wait_restart(struct restart_block *restart)
 {
        u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
-       struct rw_semaphore *fshared = NULL;
+       int fshared = 0;
        ktime_t t;
 
        t.tv64 = restart->futex.time;
        restart->fn = do_no_restart_syscall;
        if (restart->futex.flags & FLAGS_SHARED)
-               fshared = &current->mm->mmap_sem;
+               fshared = 1;
        return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
-                               restart->futex.bitset);
+                               restart->futex.bitset,
+                               restart->futex.flags & FLAGS_CLOCKRT);
 }
 
 
@@ -1396,7 +1336,7 @@ static long futex_wait_restart(struct restart_block *restart)
  * if there are waiters then it will block, it does PI, etc. (Due to
  * races the kernel might see a 0 value of the futex too.)
  */
-static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
+static int futex_lock_pi(u32 __user *uaddr, int fshared,
                         int detect, ktime_t *time, int trylock)
 {
        struct hrtimer_sleeper timeout, *to = NULL;
@@ -1419,8 +1359,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
 
        q.pi_state = NULL;
  retry:
-       futex_lock_mm(fshared);
-
+       q.key = FUTEX_KEY_INIT;
        ret = get_futex_key(uaddr, fshared, &q.key);
        if (unlikely(ret != 0))
                goto out_release_sem;
@@ -1509,7 +1448,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
                         * exit to complete.
                         */
                        queue_unlock(&q, hb);
-                       futex_unlock_mm(fshared);
                        cond_resched();
                        goto retry;
 
@@ -1541,12 +1479,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
         */
        queue_me(&q, hb);
 
-       /*
-        * Now the futex is queued and we have checked the data, we
-        * don't want to hold mmap_sem while we sleep.
-        */
-       futex_unlock_mm(fshared);
-
        WARN_ON(!q.pi_state);
        /*
         * Block on the PI mutex:
@@ -1559,7 +1491,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
                ret = ret ? 0 : -EWOULDBLOCK;
        }
 
-       futex_lock_mm(fshared);
        spin_lock(q.lock_ptr);
 
        if (!ret) {
@@ -1625,7 +1556,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
 
        /* Unqueue and drop the lock */
        unqueue_me_pi(&q);
-       futex_unlock_mm(fshared);
 
        if (to)
                destroy_hrtimer_on_stack(&to->timer);
@@ -1635,34 +1565,30 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
        queue_unlock(&q, hb);
 
  out_release_sem:
-       futex_unlock_mm(fshared);
+       put_futex_key(fshared, &q.key);
        if (to)
                destroy_hrtimer_on_stack(&to->timer);
        return ret;
 
  uaddr_faulted:
        /*
-        * We have to r/w  *(int __user *)uaddr, but we can't modify it
-        * non-atomically.  Therefore, if get_user below is not
-        * enough, we need to handle the fault ourselves, while
-        * still holding the mmap_sem.
-        *
-        * ... and hb->lock. :-) --ANK
+        * We have to r/w  *(int __user *)uaddr, and we have to modify it
+        * atomically.  Therefore, if we continue to fault after get_user()
+        * below, we need to handle the fault ourselves, while still holding
+        * the mmap_sem.  This can occur if the uaddr is under contention as
+        * we have to drop the mmap_sem in order to call get_user().
         */
        queue_unlock(&q, hb);
 
        if (attempt++) {
-               ret = futex_handle_fault((unsigned long)uaddr, fshared,
-                                        attempt);
+               ret = futex_handle_fault((unsigned long)uaddr, attempt);
                if (ret)
                        goto out_release_sem;
                goto retry_unlocked;
        }
 
-       futex_unlock_mm(fshared);
-
        ret = get_user(uval, uaddr);
-       if (!ret && (uval != -EFAULT))
+       if (!ret)
                goto retry;
 
        if (to)
@@ -1675,13 +1601,13 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
  * This is the in-kernel slowpath: we look up the PI state (if any),
  * and do the rt-mutex unlock.
  */
-static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
+static int futex_unlock_pi(u32 __user *uaddr, int fshared)
 {
        struct futex_hash_bucket *hb;
        struct futex_q *this, *next;
        u32 uval;
        struct plist_head *head;
-       union futex_key key;
+       union futex_key key = FUTEX_KEY_INIT;
        int ret, attempt = 0;
 
 retry:
@@ -1692,10 +1618,6 @@ retry:
         */
        if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
                return -EPERM;
-       /*
-        * First take all the futex related locks:
-        */
-       futex_lock_mm(fshared);
 
        ret = get_futex_key(uaddr, fshared, &key);
        if (unlikely(ret != 0))
@@ -1754,34 +1676,30 @@ retry_unlocked:
 out_unlock:
        spin_unlock(&hb->lock);
 out:
-       futex_unlock_mm(fshared);
+       put_futex_key(fshared, &key);
 
        return ret;
 
 pi_faulted:
        /*
-        * We have to r/w  *(int __user *)uaddr, but we can't modify it
-        * non-atomically.  Therefore, if get_user below is not
-        * enough, we need to handle the fault ourselves, while
-        * still holding the mmap_sem.
-        *
-        * ... and hb->lock. --ANK
+        * We have to r/w  *(int __user *)uaddr, and we have to modify it
+        * atomically.  Therefore, if we continue to fault after get_user()
+        * below, we need to handle the fault ourselves, while still holding
+        * the mmap_sem.  This can occur if the uaddr is under contention as
+        * we have to drop the mmap_sem in order to call get_user().
         */
        spin_unlock(&hb->lock);
 
        if (attempt++) {
-               ret = futex_handle_fault((unsigned long)uaddr, fshared,
-                                        attempt);
+               ret = futex_handle_fault((unsigned long)uaddr, attempt);
                if (ret)
                        goto out;
                uval = 0;
                goto retry_unlocked;
        }
 
-       futex_unlock_mm(fshared);
-
        ret = get_user(uval, uaddr);
-       if (!ret && (uval != -EFAULT))
+       if (!ret)
                goto retry;
 
        return ret;
@@ -1908,8 +1826,7 @@ retry:
                 * PI futexes happens in exit_pi_state():
                 */
                if (!pi && (uval & FUTEX_WAITERS))
-                       futex_wake(uaddr, &curr->mm->mmap_sem, 1,
-                                  FUTEX_BITSET_MATCH_ANY);
+                       futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
        }
        return 0;
 }
@@ -2003,18 +1920,22 @@ void exit_robust_list(struct task_struct *curr)
 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
                u32 __user *uaddr2, u32 val2, u32 val3)
 {
-       int ret = -ENOSYS;
+       int clockrt, ret = -ENOSYS;
        int cmd = op & FUTEX_CMD_MASK;
-       struct rw_semaphore *fshared = NULL;
+       int fshared = 0;
 
        if (!(op & FUTEX_PRIVATE_FLAG))
-               fshared = &current->mm->mmap_sem;
+               fshared = 1;
+
+       clockrt = op & FUTEX_CLOCK_REALTIME;
+       if (clockrt && cmd != FUTEX_WAIT_BITSET)
+               return -ENOSYS;
 
        switch (cmd) {
        case FUTEX_WAIT:
                val3 = FUTEX_BITSET_MATCH_ANY;
        case FUTEX_WAIT_BITSET:
-               ret = futex_wait(uaddr, fshared, val, timeout, val3);
+               ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
                break;
        case FUTEX_WAKE:
                val3 = FUTEX_BITSET_MATCH_ANY;
index 47e63349d1b2d262be2d8be568af8d3d93de3333..bda9cb92427673a4c2e47676596e854d57d7dca3 100644 (file)
@@ -442,22 +442,6 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
 #endif
 
-/*
- * Check, whether the timer is on the callback pending list
- */
-static inline int hrtimer_cb_pending(const struct hrtimer *timer)
-{
-       return timer->state & HRTIMER_STATE_PENDING;
-}
-
-/*
- * Remove a timer from the callback pending list
- */
-static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
-{
-       list_del_init(&timer->cb_entry);
-}
-
 /* High resolution timer related functions */
 #ifdef CONFIG_HIGH_RES_TIMERS
 
@@ -651,6 +635,8 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
 {
 }
 
+static void __run_hrtimer(struct hrtimer *timer);
+
 /*
  * When High resolution timers are active, try to reprogram. Note, that in case
  * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
@@ -661,31 +647,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
                                            struct hrtimer_clock_base *base)
 {
        if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
-
-               /* Timer is expired, act upon the callback mode */
-               switch(timer->cb_mode) {
-               case HRTIMER_CB_IRQSAFE_PERCPU:
-               case HRTIMER_CB_IRQSAFE_UNLOCKED:
-                       /*
-                        * This is solely for the sched tick emulation with
-                        * dynamic tick support to ensure that we do not
-                        * restart the tick right on the edge and end up with
-                        * the tick timer in the softirq ! The calling site
-                        * takes care of this. Also used for hrtimer sleeper !
-                        */
-                       debug_hrtimer_deactivate(timer);
-                       return 1;
-               case HRTIMER_CB_SOFTIRQ:
-                       /*
-                        * Move everything else into the softirq pending list !
-                        */
-                       list_add_tail(&timer->cb_entry,
-                                     &base->cpu_base->cb_pending);
-                       timer->state = HRTIMER_STATE_PENDING;
-                       return 1;
-               default:
-                       BUG();
-               }
+               /*
+                * XXX: recursion check?
+                * hrtimer_forward() should round up with timer granularity
+                * so that we never get into inf recursion here,
+                * it doesn't do that though
+                */
+               __run_hrtimer(timer);
+               return 1;
        }
        return 0;
 }
@@ -724,11 +693,6 @@ static int hrtimer_switch_to_hres(void)
        return 1;
 }
 
-static inline void hrtimer_raise_softirq(void)
-{
-       raise_softirq(HRTIMER_SOFTIRQ);
-}
-
 #else
 
 static inline int hrtimer_hres_active(void) { return 0; }
@@ -747,7 +711,6 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
 {
        return 0;
 }
-static inline void hrtimer_raise_softirq(void) { }
 
 #endif /* CONFIG_HIGH_RES_TIMERS */
 
@@ -890,10 +853,7 @@ static void __remove_hrtimer(struct hrtimer *timer,
                             struct hrtimer_clock_base *base,
                             unsigned long newstate, int reprogram)
 {
-       /* High res. callback list. NOP for !HIGHRES */
-       if (hrtimer_cb_pending(timer))
-               hrtimer_remove_cb_pending(timer);
-       else {
+       if (timer->state & HRTIMER_STATE_ENQUEUED) {
                /*
                 * Remove the timer from the rbtree and replace the
                 * first entry pointer if necessary.
@@ -953,7 +913,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
 {
        struct hrtimer_clock_base *base, *new_base;
        unsigned long flags;
-       int ret, raise;
+       int ret;
 
        base = lock_hrtimer_base(timer, &flags);
 
@@ -988,26 +948,8 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
        enqueue_hrtimer(timer, new_base,
                        new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
 
-       /*
-        * The timer may be expired and moved to the cb_pending
-        * list. We can not raise the softirq with base lock held due
-        * to a possible deadlock with runqueue lock.
-        */
-       raise = timer->state == HRTIMER_STATE_PENDING;
-
-       /*
-        * We use preempt_disable to prevent this task from migrating after
-        * setting up the softirq and raising it. Otherwise, if me migrate
-        * we will raise the softirq on the wrong CPU.
-        */
-       preempt_disable();
-
        unlock_hrtimer_base(timer, &flags);
 
-       if (raise)
-               hrtimer_raise_softirq();
-       preempt_enable();
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
@@ -1192,75 +1134,6 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
 }
 EXPORT_SYMBOL_GPL(hrtimer_get_res);
 
-static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
-{
-       spin_lock_irq(&cpu_base->lock);
-
-       while (!list_empty(&cpu_base->cb_pending)) {
-               enum hrtimer_restart (*fn)(struct hrtimer *);
-               struct hrtimer *timer;
-               int restart;
-               int emulate_hardirq_ctx = 0;
-
-               timer = list_entry(cpu_base->cb_pending.next,
-                                  struct hrtimer, cb_entry);
-
-               debug_hrtimer_deactivate(timer);
-               timer_stats_account_hrtimer(timer);
-
-               fn = timer->function;
-               /*
-                * A timer might have been added to the cb_pending list
-                * when it was migrated during a cpu-offline operation.
-                * Emulate hardirq context for such timers.
-                */
-               if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
-                   timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED)
-                       emulate_hardirq_ctx = 1;
-
-               __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
-               spin_unlock_irq(&cpu_base->lock);
-
-               if (unlikely(emulate_hardirq_ctx)) {
-                       local_irq_disable();
-                       restart = fn(timer);
-                       local_irq_enable();
-               } else
-                       restart = fn(timer);
-
-               spin_lock_irq(&cpu_base->lock);
-
-               timer->state &= ~HRTIMER_STATE_CALLBACK;
-               if (restart == HRTIMER_RESTART) {
-                       BUG_ON(hrtimer_active(timer));
-                       /*
-                        * Enqueue the timer, allow reprogramming of the event
-                        * device
-                        */
-                       enqueue_hrtimer(timer, timer->base, 1);
-               } else if (hrtimer_active(timer)) {
-                       /*
-                        * If the timer was rearmed on another CPU, reprogram
-                        * the event device.
-                        */
-                       struct hrtimer_clock_base *base = timer->base;
-
-                       if (base->first == &timer->node &&
-                           hrtimer_reprogram(timer, base)) {
-                               /*
-                                * Timer is expired. Thus move it from tree to
-                                * pending list again.
-                                */
-                               __remove_hrtimer(timer, base,
-                                                HRTIMER_STATE_PENDING, 0);
-                               list_add_tail(&timer->cb_entry,
-                                             &base->cpu_base->cb_pending);
-                       }
-               }
-       }
-       spin_unlock_irq(&cpu_base->lock);
-}
-
 static void __run_hrtimer(struct hrtimer *timer)
 {
        struct hrtimer_clock_base *base = timer->base;
@@ -1268,25 +1141,21 @@ static void __run_hrtimer(struct hrtimer *timer)
        enum hrtimer_restart (*fn)(struct hrtimer *);
        int restart;
 
+       WARN_ON(!irqs_disabled());
+
        debug_hrtimer_deactivate(timer);
        __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
        timer_stats_account_hrtimer(timer);
-
        fn = timer->function;
-       if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
-           timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
-               /*
-                * Used for scheduler timers, avoid lock inversion with
-                * rq->lock and tasklist_lock.
-                *
-                * These timers are required to deal with enqueue expiry
-                * themselves and are not allowed to migrate.
-                */
-               spin_unlock(&cpu_base->lock);
-               restart = fn(timer);
-               spin_lock(&cpu_base->lock);
-       } else
-               restart = fn(timer);
+
+       /*
+        * Because we run timers from hardirq context, there is no chance
+        * they get migrated to another cpu, therefore its safe to unlock
+        * the timer base.
+        */
+       spin_unlock(&cpu_base->lock);
+       restart = fn(timer);
+       spin_lock(&cpu_base->lock);
 
        /*
         * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
@@ -1311,7 +1180,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
        struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
        struct hrtimer_clock_base *base;
        ktime_t expires_next, now;
-       int i, raise = 0;
+       int i;
 
        BUG_ON(!cpu_base->hres_active);
        cpu_base->nr_events++;
@@ -1360,16 +1229,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
                                break;
                        }
 
-                       /* Move softirq callbacks to the pending list */
-                       if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
-                               __remove_hrtimer(timer, base,
-                                                HRTIMER_STATE_PENDING, 0);
-                               list_add_tail(&timer->cb_entry,
-                                             &base->cpu_base->cb_pending);
-                               raise = 1;
-                               continue;
-                       }
-
                        __run_hrtimer(timer);
                }
                spin_unlock(&cpu_base->lock);
@@ -1383,10 +1242,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
                if (tick_program_event(expires_next, 0))
                        goto retry;
        }
-
-       /* Raise softirq ? */
-       if (raise)
-               raise_softirq(HRTIMER_SOFTIRQ);
 }
 
 /**
@@ -1413,11 +1268,6 @@ void hrtimer_peek_ahead_timers(void)
        local_irq_restore(flags);
 }
 
-static void run_hrtimer_softirq(struct softirq_action *h)
-{
-       run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
-}
-
 #endif /* CONFIG_HIGH_RES_TIMERS */
 
 /*
@@ -1429,8 +1279,6 @@ static void run_hrtimer_softirq(struct softirq_action *h)
  */
 void hrtimer_run_pending(void)
 {
-       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
-
        if (hrtimer_hres_active())
                return;
 
@@ -1444,8 +1292,6 @@ void hrtimer_run_pending(void)
         */
        if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
                hrtimer_switch_to_hres();
-
-       run_hrtimer_pending(cpu_base);
 }
 
 /*
@@ -1482,14 +1328,6 @@ void hrtimer_run_queues(void)
                                        hrtimer_get_expires_tv64(timer))
                                break;
 
-                       if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
-                               __remove_hrtimer(timer, base,
-                                       HRTIMER_STATE_PENDING, 0);
-                               list_add_tail(&timer->cb_entry,
-                                       &base->cpu_base->cb_pending);
-                               continue;
-                       }
-
                        __run_hrtimer(timer);
                }
                spin_unlock(&cpu_base->lock);
@@ -1516,9 +1354,6 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
 {
        sl->timer.function = hrtimer_wakeup;
        sl->task = task;
-#ifdef CONFIG_HIGH_RES_TIMERS
-       sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
-#endif
 }
 
 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
@@ -1655,36 +1490,22 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
                cpu_base->clock_base[i].cpu_base = cpu_base;
 
-       INIT_LIST_HEAD(&cpu_base->cb_pending);
        hrtimer_init_hres(cpu_base);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
 
-static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
-                               struct hrtimer_clock_base *new_base, int dcpu)
+static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+                               struct hrtimer_clock_base *new_base)
 {
        struct hrtimer *timer;
        struct rb_node *node;
-       int raise = 0;
 
        while ((node = rb_first(&old_base->active))) {
                timer = rb_entry(node, struct hrtimer, node);
                BUG_ON(hrtimer_callback_running(timer));
                debug_hrtimer_deactivate(timer);
 
-               /*
-                * Should not happen. Per CPU timers should be
-                * canceled _before_ the migration code is called
-                */
-               if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
-                       __remove_hrtimer(timer, old_base,
-                                        HRTIMER_STATE_INACTIVE, 0);
-                       WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
-                            timer, timer->function, dcpu);
-                       continue;
-               }
-
                /*
                 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
                 * timer could be seen as !active and just vanish away
@@ -1693,69 +1514,34 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
                __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
                timer->base = new_base;
                /*
-                * Enqueue the timer. Allow reprogramming of the event device
+                * Enqueue the timers on the new cpu, but do not reprogram 
+                * the timer as that would enable a deadlock between
+                * hrtimer_enqueue_reprogramm() running the timer and us still
+                * holding a nested base lock.
+                *
+                * Instead we tickle the hrtimer interrupt after the migration
+                * is done, which will run all expired timers and re-programm
+                * the timer device.
                 */
-               enqueue_hrtimer(timer, new_base, 1);
+               enqueue_hrtimer(timer, new_base, 0);
 
-#ifdef CONFIG_HIGH_RES_TIMERS
-               /*
-                * Happens with high res enabled when the timer was
-                * already expired and the callback mode is
-                * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
-                * enqueue code does not move them to the soft irq
-                * pending list for performance/latency reasons, but
-                * in the migration state, we need to do that
-                * otherwise we end up with a stale timer.
-                */
-               if (timer->state == HRTIMER_STATE_MIGRATE) {
-                       timer->state = HRTIMER_STATE_PENDING;
-                       list_add_tail(&timer->cb_entry,
-                                     &new_base->cpu_base->cb_pending);
-                       raise = 1;
-               }
-#endif
                /* Clear the migration state bit */
                timer->state &= ~HRTIMER_STATE_MIGRATE;
        }
-       return raise;
-}
-
-#ifdef CONFIG_HIGH_RES_TIMERS
-static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
-                                  struct hrtimer_cpu_base *new_base)
-{
-       struct hrtimer *timer;
-       int raise = 0;
-
-       while (!list_empty(&old_base->cb_pending)) {
-               timer = list_entry(old_base->cb_pending.next,
-                                  struct hrtimer, cb_entry);
-
-               __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
-               timer->base = &new_base->clock_base[timer->base->index];
-               list_add_tail(&timer->cb_entry, &new_base->cb_pending);
-               raise = 1;
-       }
-       return raise;
-}
-#else
-static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
-                                  struct hrtimer_cpu_base *new_base)
-{
-       return 0;
 }
-#endif
 
-static void migrate_hrtimers(int cpu)
+static int migrate_hrtimers(int scpu)
 {
        struct hrtimer_cpu_base *old_base, *new_base;
-       int i, raise = 0;
+       int dcpu, i;
 
-       BUG_ON(cpu_online(cpu));
-       old_base = &per_cpu(hrtimer_bases, cpu);
+       BUG_ON(cpu_online(scpu));
+       old_base = &per_cpu(hrtimer_bases, scpu);
        new_base = &get_cpu_var(hrtimer_bases);
 
-       tick_cancel_sched_timer(cpu);
+       dcpu = smp_processor_id();
+
+       tick_cancel_sched_timer(scpu);
        /*
         * The caller is globally serialized and nobody else
         * takes two locks at once, deadlock is not possible.
@@ -1764,41 +1550,47 @@ static void migrate_hrtimers(int cpu)
        spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
-               if (migrate_hrtimer_list(&old_base->clock_base[i],
-                                        &new_base->clock_base[i], cpu))
-                       raise = 1;
+               migrate_hrtimer_list(&old_base->clock_base[i],
+                                    &new_base->clock_base[i]);
        }
 
-       if (migrate_hrtimer_pending(old_base, new_base))
-               raise = 1;
-
        spin_unlock(&old_base->lock);
        spin_unlock_irq(&new_base->lock);
        put_cpu_var(hrtimer_bases);
 
-       if (raise)
-               hrtimer_raise_softirq();
+       return dcpu;
+}
+
+static void tickle_timers(void *arg)
+{
+       hrtimer_peek_ahead_timers();
 }
+
 #endif /* CONFIG_HOTPLUG_CPU */
 
 static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
                                        unsigned long action, void *hcpu)
 {
-       unsigned int cpu = (long)hcpu;
+       int scpu = (long)hcpu;
 
        switch (action) {
 
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               init_hrtimers_cpu(cpu);
+               init_hrtimers_cpu(scpu);
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
-               clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
-               migrate_hrtimers(cpu);
+       {
+               int dcpu;
+
+               clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
+               dcpu = migrate_hrtimers(scpu);
+               smp_call_function_single(dcpu, tickle_timers, NULL, 0);
                break;
+       }
 #endif
 
        default:
@@ -1817,9 +1609,6 @@ void __init hrtimers_init(void)
        hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
                          (void *)(long)smp_processor_id());
        register_cpu_notifier(&hrtimers_nb);
-#ifdef CONFIG_HIGH_RES_TIMERS
-       open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
-#endif
 }
 
 /**
index 681c52dbfe229bbffc32db74c395f3f149738ed4..4dd5b1edac984bca7326297ecee3daf0239301f1 100644 (file)
@@ -3,3 +3,4 @@ obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o
 obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
+obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o
index cc0f7321b8cede4192a4ceb9ffc97311ec9cc7d0..650ce4102a6333d9ddf097692583e55659a25333 100644 (file)
@@ -40,6 +40,9 @@ unsigned long probe_irq_on(void)
         * flush such a longstanding irq before considering it as spurious.
         */
        for_each_irq_desc_reverse(i, desc) {
+               if (!desc)
+                       continue;
+
                spin_lock_irq(&desc->lock);
                if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
                        /*
@@ -68,6 +71,9 @@ unsigned long probe_irq_on(void)
         * happened in the previous stage, it may have masked itself)
         */
        for_each_irq_desc_reverse(i, desc) {
+               if (!desc)
+                       continue;
+
                spin_lock_irq(&desc->lock);
                if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
                        desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
@@ -86,6 +92,9 @@ unsigned long probe_irq_on(void)
         * Now filter out any obviously spurious interrupts
         */
        for_each_irq_desc(i, desc) {
+               if (!desc)
+                       continue;
+
                spin_lock_irq(&desc->lock);
                status = desc->status;
 
@@ -124,6 +133,9 @@ unsigned int probe_irq_mask(unsigned long val)
        int i;
 
        for_each_irq_desc(i, desc) {
+               if (!desc)
+                       continue;
+
                spin_lock_irq(&desc->lock);
                status = desc->status;
 
@@ -166,6 +178,9 @@ int probe_irq_off(unsigned long val)
        unsigned int status;
 
        for_each_irq_desc(i, desc) {
+               if (!desc)
+                       continue;
+
                spin_lock_irq(&desc->lock);
                status = desc->status;
 
index 10b5092e9bfe749da57b2ab408e3f6d38ae81d75..6eb3c7952b6496fc9c5f8da49b982d79866e1504 100644 (file)
  */
 void dynamic_irq_init(unsigned int irq)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_desc *desc;
        unsigned long flags;
 
+       desc = irq_to_desc(irq);
        if (!desc) {
                WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
                return;
@@ -124,6 +125,7 @@ int set_irq_type(unsigned int irq, unsigned int type)
                return -ENODEV;
        }
 
+       type &= IRQ_TYPE_SENSE_MASK;
        if (type == IRQ_TYPE_NONE)
                return 0;
 
@@ -352,6 +354,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
 
        spin_lock(&desc->lock);
        mask_ack_irq(desc, irq);
+       desc = irq_remap_to_desc(irq, desc);
 
        if (unlikely(desc->status & IRQ_INPROGRESS))
                goto out_unlock;
@@ -429,6 +432,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
        desc->status &= ~IRQ_INPROGRESS;
 out:
        desc->chip->eoi(irq);
+       desc = irq_remap_to_desc(irq, desc);
 
        spin_unlock(&desc->lock);
 }
@@ -465,12 +469,14 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
                    !desc->action)) {
                desc->status |= (IRQ_PENDING | IRQ_MASKED);
                mask_ack_irq(desc, irq);
+               desc = irq_remap_to_desc(irq, desc);
                goto out_unlock;
        }
        kstat_incr_irqs_this_cpu(irq, desc);
 
        /* Start handling the irq */
        desc->chip->ack(irq);
+       desc = irq_remap_to_desc(irq, desc);
 
        /* Mark the IRQ currently in progress.*/
        desc->status |= IRQ_INPROGRESS;
@@ -531,8 +537,10 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
        if (!noirqdebug)
                note_interrupt(irq, desc, action_ret);
 
-       if (desc->chip->eoi)
+       if (desc->chip->eoi) {
                desc->chip->eoi(irq);
+               desc = irq_remap_to_desc(irq, desc);
+       }
 }
 
 void
@@ -567,8 +575,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
 
        /* Uninstall? */
        if (handle == handle_bad_irq) {
-               if (desc->chip != &no_irq_chip)
+               if (desc->chip != &no_irq_chip) {
                        mask_ack_irq(desc, irq);
+                       desc = irq_remap_to_desc(irq, desc);
+               }
                desc->status |= IRQ_DISABLED;
                desc->depth = 1;
        }
index c815b42d0f5bf12baed5e25eb92c7f2d5e4b4472..6492400cb50dfb0659a9627dcc342485a5a21021 100644 (file)
 #include <linux/random.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
+#include <linux/rculist.h>
+#include <linux/hash.h>
 
 #include "internals.h"
 
+/*
+ * lockdep: we want to handle all irq_desc locks as a single lock-class:
+ */
+struct lock_class_key irq_desc_lock_class;
+
 /**
  * handle_bad_irq - handle spurious and unhandled irqs
  * @irq:       the interrupt number
@@ -49,6 +56,155 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
 int nr_irqs = NR_IRQS;
 EXPORT_SYMBOL_GPL(nr_irqs);
 
+void __init __attribute__((weak)) arch_early_irq_init(void)
+{
+}
+
+#ifdef CONFIG_SPARSE_IRQ
+static struct irq_desc irq_desc_init = {
+       .irq        = -1,
+       .status     = IRQ_DISABLED,
+       .chip       = &no_irq_chip,
+       .handle_irq = handle_bad_irq,
+       .depth      = 1,
+       .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+#ifdef CONFIG_SMP
+       .affinity   = CPU_MASK_ALL
+#endif
+};
+
+void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
+{
+       unsigned long bytes;
+       char *ptr;
+       int node;
+
+       /* Compute how many bytes we need per irq and allocate them */
+       bytes = nr * sizeof(unsigned int);
+
+       node = cpu_to_node(cpu);
+       ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
+       printk(KERN_DEBUG "  alloc kstat_irqs on cpu %d node %d\n", cpu, node);
+
+       if (ptr)
+               desc->kstat_irqs = (unsigned int *)ptr;
+}
+
+void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu)
+{
+}
+
+static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
+{
+       memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
+       desc->irq = irq;
+#ifdef CONFIG_SMP
+       desc->cpu = cpu;
+#endif
+       lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+       init_kstat_irqs(desc, cpu, nr_cpu_ids);
+       if (!desc->kstat_irqs) {
+               printk(KERN_ERR "can not alloc kstat_irqs\n");
+               BUG_ON(1);
+       }
+       arch_init_chip_data(desc, cpu);
+}
+
+/*
+ * Protect the sparse_irqs:
+ */
+DEFINE_SPINLOCK(sparse_irq_lock);
+
+struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
+
+static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
+       [0 ... NR_IRQS_LEGACY-1] = {
+               .irq        = -1,
+               .status     = IRQ_DISABLED,
+               .chip       = &no_irq_chip,
+               .handle_irq = handle_bad_irq,
+               .depth      = 1,
+               .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+#ifdef CONFIG_SMP
+               .affinity   = CPU_MASK_ALL
+#endif
+       }
+};
+
+/* FIXME: use bootmem alloc ...*/
+static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
+
+void __init early_irq_init(void)
+{
+       struct irq_desc *desc;
+       int legacy_count;
+       int i;
+
+       desc = irq_desc_legacy;
+       legacy_count = ARRAY_SIZE(irq_desc_legacy);
+
+       for (i = 0; i < legacy_count; i++) {
+               desc[i].irq = i;
+               desc[i].kstat_irqs = kstat_irqs_legacy[i];
+
+               irq_desc_ptrs[i] = desc + i;
+       }
+
+       for (i = legacy_count; i < NR_IRQS; i++)
+               irq_desc_ptrs[i] = NULL;
+
+       arch_early_irq_init();
+}
+
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+       return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
+}
+
+struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
+{
+       struct irq_desc *desc;
+       unsigned long flags;
+       int node;
+
+       if (irq >= NR_IRQS) {
+               printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
+                               irq, NR_IRQS);
+               WARN_ON(1);
+               return NULL;
+       }
+
+       desc = irq_desc_ptrs[irq];
+       if (desc)
+               return desc;
+
+       spin_lock_irqsave(&sparse_irq_lock, flags);
+
+       /* We have to check it to avoid races with another CPU */
+       desc = irq_desc_ptrs[irq];
+       if (desc)
+               goto out_unlock;
+
+       node = cpu_to_node(cpu);
+       desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
+       printk(KERN_DEBUG "  alloc irq_desc for %d on cpu %d node %d\n",
+                irq, cpu, node);
+       if (!desc) {
+               printk(KERN_ERR "can not alloc irq_desc\n");
+               BUG_ON(1);
+       }
+       init_one_irq_desc(irq, desc, cpu);
+
+       irq_desc_ptrs[irq] = desc;
+
+out_unlock:
+       spin_unlock_irqrestore(&sparse_irq_lock, flags);
+
+       return desc;
+}
+
+#else
+
 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
        [0 ... NR_IRQS-1] = {
                .status = IRQ_DISABLED,
@@ -62,6 +218,8 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
        }
 };
 
+#endif
+
 /*
  * What should we do if we get a hw irq event on an illegal vector?
  * Each architecture has to answer this themself.
@@ -179,8 +337,11 @@ unsigned int __do_IRQ(unsigned int irq)
                /*
                 * No locking required for CPU-local interrupts:
                 */
-               if (desc->chip->ack)
+               if (desc->chip->ack) {
                        desc->chip->ack(irq);
+                       /* get new one */
+                       desc = irq_remap_to_desc(irq, desc);
+               }
                if (likely(!(desc->status & IRQ_DISABLED))) {
                        action_ret = handle_IRQ_event(irq, desc->action);
                        if (!noirqdebug)
@@ -191,8 +352,10 @@ unsigned int __do_IRQ(unsigned int irq)
        }
 
        spin_lock(&desc->lock);
-       if (desc->chip->ack)
+       if (desc->chip->ack) {
                desc->chip->ack(irq);
+               desc = irq_remap_to_desc(irq, desc);
+       }
        /*
         * REPLAY is when Linux resends an IRQ that was dropped earlier
         * WAITING is used by probe to mark irqs that are being tested
@@ -259,19 +422,25 @@ out:
 }
 #endif
 
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-/*
- * lockdep: we want to handle all irq_desc locks as a single lock-class:
- */
-static struct lock_class_key irq_desc_lock_class;
-
 void early_init_irq_lock_class(void)
 {
        struct irq_desc *desc;
        int i;
 
-       for_each_irq_desc(i, desc)
+       for_each_irq_desc(i, desc) {
+               if (!desc)
+                       continue;
+
                lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+       }
+}
+
+#ifdef CONFIG_SPARSE_IRQ
+unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       return desc->kstat_irqs[cpu];
 }
 #endif
+EXPORT_SYMBOL(kstat_irqs_cpu);
+
index 64c1c7253dae091b931652aa235ba3c44b6060c5..e6d0a43cc1255c2abb3778fcec5f5b1d4e5290ec 100644 (file)
@@ -13,6 +13,11 @@ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
 extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                unsigned long flags);
 
+extern struct lock_class_key irq_desc_lock_class;
+extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
+extern spinlock_t sparse_irq_lock;
+extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
+
 #ifdef CONFIG_PROC_FS
 extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
 extern void register_handler_proc(unsigned int irq, struct irqaction *action);
index 801addda3c43d4a7a767b27468fee05232cb6f89..540f6c49f3fa156b2bd0d61ad2c5090ea0e46013 100644 (file)
@@ -370,16 +370,18 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                return 0;
        }
 
-       ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK);
+       /* caller masked out all except trigger mode flags */
+       ret = chip->set_type(irq, flags);
 
        if (ret)
                pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
-                               (int)(flags & IRQF_TRIGGER_MASK),
-                               irq, chip->set_type);
+                               (int)flags, irq, chip->set_type);
        else {
+               if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+                       flags |= IRQ_LEVEL;
                /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
-               desc->status &= ~IRQ_TYPE_SENSE_MASK;
-               desc->status |= flags & IRQ_TYPE_SENSE_MASK;
+               desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
+               desc->status |= flags;
        }
 
        return ret;
@@ -459,7 +461,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
 
                /* Setup the type (level, edge polarity) if configured: */
                if (new->flags & IRQF_TRIGGER_MASK) {
-                       ret = __irq_set_trigger(desc, irq, new->flags);
+                       ret = __irq_set_trigger(desc, irq,
+                                       new->flags & IRQF_TRIGGER_MASK);
 
                        if (ret) {
                                spin_unlock_irqrestore(&desc->lock, flags);
@@ -673,6 +676,18 @@ int request_irq(unsigned int irq, irq_handler_t handler,
        struct irq_desc *desc;
        int retval;
 
+       /*
+        * handle_IRQ_event() always ignores IRQF_DISABLED except for
+        * the _first_ irqaction (sigh).  That can cause oopsing, but
+        * the behavior is classified as "will not fix" so we need to
+        * start nudging drivers away from using that idiom.
+        */
+       if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
+                       == (IRQF_SHARED|IRQF_DISABLED))
+               pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
+                               "guaranteed on shared IRQs\n",
+                               irq, devname);
+
 #ifdef CONFIG_LOCKDEP
        /*
         * Lockdep wants atomic interrupt handlers:
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
new file mode 100644 (file)
index 0000000..089c374
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * NUMA irq-desc migration code
+ *
+ * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to
+ * the new "home node" of the IRQ.
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+
+#include "internals.h"
+
+static void init_copy_kstat_irqs(struct irq_desc *old_desc,
+                                struct irq_desc *desc,
+                                int cpu, int nr)
+{
+       unsigned long bytes;
+
+       init_kstat_irqs(desc, cpu, nr);
+
+       if (desc->kstat_irqs != old_desc->kstat_irqs) {
+               /* Compute how many bytes we need per irq and allocate them */
+               bytes = nr * sizeof(unsigned int);
+
+               memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
+       }
+}
+
+static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
+{
+       if (old_desc->kstat_irqs == desc->kstat_irqs)
+               return;
+
+       kfree(old_desc->kstat_irqs);
+       old_desc->kstat_irqs = NULL;
+}
+
+static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
+                struct irq_desc *desc, int cpu)
+{
+       memcpy(desc, old_desc, sizeof(struct irq_desc));
+       desc->cpu = cpu;
+       lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+       init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
+       arch_init_copy_chip_data(old_desc, desc, cpu);
+}
+
+static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
+{
+       free_kstat_irqs(old_desc, desc);
+       arch_free_chip_data(old_desc, desc);
+}
+
+static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
+                                               int cpu)
+{
+       struct irq_desc *desc;
+       unsigned int irq;
+       unsigned long flags;
+       int node;
+
+       irq = old_desc->irq;
+
+       spin_lock_irqsave(&sparse_irq_lock, flags);
+
+       /* We have to check it to avoid races with another CPU */
+       desc = irq_desc_ptrs[irq];
+
+       if (desc && old_desc != desc)
+                       goto out_unlock;
+
+       node = cpu_to_node(cpu);
+       desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
+       printk(KERN_DEBUG "  move irq_desc for %d to cpu %d node %d\n",
+                irq, cpu, node);
+       if (!desc) {
+               printk(KERN_ERR "can not get new irq_desc for moving\n");
+               /* still use old one */
+               desc = old_desc;
+               goto out_unlock;
+       }
+       init_copy_one_irq_desc(irq, old_desc, desc, cpu);
+
+       irq_desc_ptrs[irq] = desc;
+
+       /* free the old one */
+       free_one_irq_desc(old_desc, desc);
+       kfree(old_desc);
+
+out_unlock:
+       spin_unlock_irqrestore(&sparse_irq_lock, flags);
+
+       return desc;
+}
+
+struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu)
+{
+       int old_cpu;
+       int node, old_node;
+
+       /* those all static, do move them */
+       if (desc->irq < NR_IRQS_LEGACY)
+               return desc;
+
+       old_cpu = desc->cpu;
+       printk(KERN_DEBUG
+                "try to move irq_desc from cpu %d to %d\n", old_cpu, cpu);
+       if (old_cpu != cpu) {
+               node = cpu_to_node(cpu);
+               old_node = cpu_to_node(old_cpu);
+               if (old_node != node)
+                       desc = __real_move_irq_desc(desc, cpu);
+               else
+                       desc->cpu = cpu;
+       }
+
+       return desc;
+}
+
index d257e7d6a8a4ba4b10504dcd8490c812fe37504c..f6b3440f05bc50dbe68cdb7026e4e287e227b934 100644 (file)
@@ -243,7 +243,11 @@ void init_irq_proc(void)
        /*
         * Create entries for all existing IRQs.
         */
-       for_each_irq_desc(irq, desc)
+       for_each_irq_desc(irq, desc) {
+               if (!desc)
+                       continue;
+
                register_irq_proc(irq, desc);
+       }
 }
 
index dd364c11e56e0f82e0e923fcc3b60d17524e991d..3738107531fd29190b62e628d4d7da8f1081ec96 100644 (file)
@@ -91,6 +91,9 @@ static int misrouted_irq(int irq)
        int i, ok = 0;
 
        for_each_irq_desc(i, desc) {
+               if (!desc)
+                       continue;
+
                if (!i)
                         continue;
 
@@ -112,6 +115,8 @@ static void poll_spurious_irqs(unsigned long dummy)
        for_each_irq_desc(i, desc) {
                unsigned int status;
 
+               if (!desc)
+                       continue;
                if (!i)
                         continue;
 
index 74b1878b8bb8170a21957e74600465437aa225bd..06b0c3568f0b230a8c6b669d055c8c442a7eb790 100644 (file)
@@ -137,16 +137,16 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
 #ifdef CONFIG_LOCK_STAT
 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
 
-static int lock_contention_point(struct lock_class *class, unsigned long ip)
+static int lock_point(unsigned long points[], unsigned long ip)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
-               if (class->contention_point[i] == 0) {
-                       class->contention_point[i] = ip;
+       for (i = 0; i < LOCKSTAT_POINTS; i++) {
+               if (points[i] == 0) {
+                       points[i] = ip;
                        break;
                }
-               if (class->contention_point[i] == ip)
+               if (points[i] == ip)
                        break;
        }
 
@@ -186,6 +186,9 @@ struct lock_class_stats lock_stats(struct lock_class *class)
                for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
                        stats.contention_point[i] += pcs->contention_point[i];
 
+               for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
+                       stats.contending_point[i] += pcs->contending_point[i];
+
                lock_time_add(&pcs->read_waittime, &stats.read_waittime);
                lock_time_add(&pcs->write_waittime, &stats.write_waittime);
 
@@ -210,6 +213,7 @@ void clear_lock_stats(struct lock_class *class)
                memset(cpu_stats, 0, sizeof(struct lock_class_stats));
        }
        memset(class->contention_point, 0, sizeof(class->contention_point));
+       memset(class->contending_point, 0, sizeof(class->contending_point));
 }
 
 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
@@ -288,14 +292,12 @@ void lockdep_off(void)
 {
        current->lockdep_recursion++;
 }
-
 EXPORT_SYMBOL(lockdep_off);
 
 void lockdep_on(void)
 {
        current->lockdep_recursion--;
 }
-
 EXPORT_SYMBOL(lockdep_on);
 
 /*
@@ -577,7 +579,8 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 /*
  * printk all lock dependencies starting at <entry>:
  */
-static void print_lock_dependencies(struct lock_class *class, int depth)
+static void __used
+print_lock_dependencies(struct lock_class *class, int depth)
 {
        struct lock_list *entry;
 
@@ -2509,7 +2512,6 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
        if (subclass)
                register_lock_class(lock, subclass, 1);
 }
-
 EXPORT_SYMBOL_GPL(lockdep_init_map);
 
 /*
@@ -2690,8 +2692,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
 }
 
 static int
-__lock_set_subclass(struct lockdep_map *lock,
-                   unsigned int subclass, unsigned long ip)
+__lock_set_class(struct lockdep_map *lock, const char *name,
+                struct lock_class_key *key, unsigned int subclass,
+                unsigned long ip)
 {
        struct task_struct *curr = current;
        struct held_lock *hlock, *prev_hlock;
@@ -2718,6 +2721,7 @@ __lock_set_subclass(struct lockdep_map *lock,
        return print_unlock_inbalance_bug(curr, lock, ip);
 
 found_it:
+       lockdep_init_map(lock, name, key, 0);
        class = register_lock_class(lock, subclass, 0);
        hlock->class_idx = class - lock_classes + 1;
 
@@ -2902,9 +2906,9 @@ static void check_flags(unsigned long flags)
 #endif
 }
 
-void
-lock_set_subclass(struct lockdep_map *lock,
-                 unsigned int subclass, unsigned long ip)
+void lock_set_class(struct lockdep_map *lock, const char *name,
+                   struct lock_class_key *key, unsigned int subclass,
+                   unsigned long ip)
 {
        unsigned long flags;
 
@@ -2914,13 +2918,12 @@ lock_set_subclass(struct lockdep_map *lock,
        raw_local_irq_save(flags);
        current->lockdep_recursion = 1;
        check_flags(flags);
-       if (__lock_set_subclass(lock, subclass, ip))
+       if (__lock_set_class(lock, name, key, subclass, ip))
                check_chain_key(current);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
-
-EXPORT_SYMBOL_GPL(lock_set_subclass);
+EXPORT_SYMBOL_GPL(lock_set_class);
 
 /*
  * We are not always called with irqs disabled - do that here,
@@ -2944,7 +2947,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
-
 EXPORT_SYMBOL_GPL(lock_acquire);
 
 void lock_release(struct lockdep_map *lock, int nested,
@@ -2962,7 +2964,6 @@ void lock_release(struct lockdep_map *lock, int nested,
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
-
 EXPORT_SYMBOL_GPL(lock_release);
 
 #ifdef CONFIG_LOCK_STAT
@@ -3000,7 +3001,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
        struct held_lock *hlock, *prev_hlock;
        struct lock_class_stats *stats;
        unsigned int depth;
-       int i, point;
+       int i, contention_point, contending_point;
 
        depth = curr->lockdep_depth;
        if (DEBUG_LOCKS_WARN_ON(!depth))
@@ -3024,18 +3025,22 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
 found_it:
        hlock->waittime_stamp = sched_clock();
 
-       point = lock_contention_point(hlock_class(hlock), ip);
+       contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
+       contending_point = lock_point(hlock_class(hlock)->contending_point,
+                                     lock->ip);
 
        stats = get_lock_stats(hlock_class(hlock));
-       if (point < ARRAY_SIZE(stats->contention_point))
-               stats->contention_point[point]++;
+       if (contention_point < LOCKSTAT_POINTS)
+               stats->contention_point[contention_point]++;
+       if (contending_point < LOCKSTAT_POINTS)
+               stats->contending_point[contending_point]++;
        if (lock->cpu != smp_processor_id())
                stats->bounces[bounce_contended + !!hlock->read]++;
        put_lock_stats(stats);
 }
 
 static void
-__lock_acquired(struct lockdep_map *lock)
+__lock_acquired(struct lockdep_map *lock, unsigned long ip)
 {
        struct task_struct *curr = current;
        struct held_lock *hlock, *prev_hlock;
@@ -3084,6 +3089,7 @@ found_it:
        put_lock_stats(stats);
 
        lock->cpu = cpu;
+       lock->ip = ip;
 }
 
 void lock_contended(struct lockdep_map *lock, unsigned long ip)
@@ -3105,7 +3111,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
 }
 EXPORT_SYMBOL_GPL(lock_contended);
 
-void lock_acquired(struct lockdep_map *lock)
+void lock_acquired(struct lockdep_map *lock, unsigned long ip)
 {
        unsigned long flags;
 
@@ -3118,7 +3124,7 @@ void lock_acquired(struct lockdep_map *lock)
        raw_local_irq_save(flags);
        check_flags(flags);
        current->lockdep_recursion = 1;
-       __lock_acquired(lock);
+       __lock_acquired(lock, ip);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
@@ -3442,7 +3448,6 @@ retry:
        if (unlock)
                read_unlock(&tasklist_lock);
 }
-
 EXPORT_SYMBOL_GPL(debug_show_all_locks);
 
 /*
@@ -3463,7 +3468,6 @@ void debug_show_held_locks(struct task_struct *task)
 {
                __debug_show_held_locks(task);
 }
-
 EXPORT_SYMBOL_GPL(debug_show_held_locks);
 
 void lockdep_sys_exit(void)
index 20dbcbf9c7dd2cf34486f3e9f307f1e3cf8a97bc..13716b8138961ee9f5feffe23ad9115fc27766e3 100644 (file)
@@ -470,11 +470,12 @@ static void seq_line(struct seq_file *m, char c, int offset, int length)
 
 static void snprint_time(char *buf, size_t bufsiz, s64 nr)
 {
-       unsigned long rem;
+       s64 div;
+       s32 rem;
 
        nr += 5; /* for display rounding */
-       rem = do_div(nr, 1000); /* XXX: do_div_signed */
-       snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, (int)rem/10);
+       div = div_s64_rem(nr, 1000, &rem);
+       snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
 }
 
 static void seq_time(struct seq_file *m, s64 time)
@@ -556,7 +557,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
        if (stats->read_holdtime.nr)
                namelen += 2;
 
-       for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
+       for (i = 0; i < LOCKSTAT_POINTS; i++) {
                char sym[KSYM_SYMBOL_LEN];
                char ip[32];
 
@@ -573,6 +574,23 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
                                stats->contention_point[i],
                                ip, sym);
        }
+       for (i = 0; i < LOCKSTAT_POINTS; i++) {
+               char sym[KSYM_SYMBOL_LEN];
+               char ip[32];
+
+               if (class->contending_point[i] == 0)
+                       break;
+
+               if (!i)
+                       seq_line(m, '-', 40-namelen, namelen);
+
+               sprint_symbol(sym, class->contending_point[i]);
+               snprintf(ip, sizeof(ip), "[<%p>]",
+                               (void *)class->contending_point[i]);
+               seq_printf(m, "%40s %14lu %29s %s\n", name,
+                               stats->contending_point[i],
+                               ip, sym);
+       }
        if (i) {
                seq_puts(m, "\n");
                seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
@@ -582,7 +600,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
 
 static void seq_header(struct seq_file *m)
 {
-       seq_printf(m, "lock_stat version 0.2\n");
+       seq_printf(m, "lock_stat version 0.3\n");
        seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
        seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
                        "%14s %14s\n",
index 12c779dc65d48a56f1941cc41d73f9593828cf0a..4f45d4b658ef6ab1fda9357725b01361a6768a76 100644 (file)
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
  * We also put the fastpath first in the kernel image, to make sure the
  * branch is predicted by the CPU as default-untaken.
  */
-static void noinline __sched
+static __used noinline void __sched
 __mutex_lock_slowpath(atomic_t *lock_count);
 
 /***
@@ -96,7 +96,7 @@ void inline __sched mutex_lock(struct mutex *lock)
 EXPORT_SYMBOL(mutex_lock);
 #endif
 
-static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
+static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
 
 /***
  * mutex_unlock - release the mutex
@@ -184,7 +184,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        }
 
 done:
-       lock_acquired(&lock->dep_map);
+       lock_acquired(&lock->dep_map, ip);
        /* got the lock - rejoice! */
        mutex_remove_waiter(lock, &waiter, task_thread_info(task));
        debug_mutex_set_owner(lock, task_thread_info(task));
@@ -268,7 +268,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
 /*
  * Release the lock, slowpath:
  */
-static noinline void
+static __used noinline void
 __mutex_unlock_slowpath(atomic_t *lock_count)
 {
        __mutex_unlock_common_slowpath(lock_count, 1);
@@ -313,7 +313,7 @@ int __sched mutex_lock_killable(struct mutex *lock)
 }
 EXPORT_SYMBOL(mutex_lock_killable);
 
-static noinline void __sched
+static __used noinline void __sched
 __mutex_lock_slowpath(atomic_t *lock_count)
 {
        struct mutex *lock = container_of(lock_count, struct mutex, count);
index 4282c0a40a57ada651b86c7dcce2389abf489448..61d5aa5eced3466393582e4f566b63c468ea7cc3 100644 (file)
@@ -82,6 +82,14 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
 
        while (nb && nr_to_call) {
                next_nb = rcu_dereference(nb->next);
+
+#ifdef CONFIG_DEBUG_NOTIFIERS
+               if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
+                       WARN(1, "Invalid notifier called!");
+                       nb = next_nb;
+                       continue;
+               }
+#endif
                ret = nb->notifier_call(nb, val, v);
 
                if (nr_calls)
index 4d5088355bfefba8956ff5c91cbc4b932d23eff6..13f06349a7868aa2ad79635266b94050eb932545 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/debug_locks.h>
 #include <linux/random.h>
 #include <linux/kallsyms.h>
+#include <linux/dmi.h>
 
 int panic_on_oops;
 static unsigned long tainted_mask;
@@ -321,36 +322,27 @@ void oops_exit(void)
 }
 
 #ifdef WANT_WARN_ON_SLOWPATH
-void warn_on_slowpath(const char *file, int line)
-{
-       char function[KSYM_SYMBOL_LEN];
-       unsigned long caller = (unsigned long) __builtin_return_address(0);
-       sprint_symbol(function, caller);
-
-       printk(KERN_WARNING "------------[ cut here ]------------\n");
-       printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
-               line, function);
-       print_modules();
-       dump_stack();
-       print_oops_end_marker();
-       add_taint(TAINT_WARN);
-}
-EXPORT_SYMBOL(warn_on_slowpath);
-
-
 void warn_slowpath(const char *file, int line, const char *fmt, ...)
 {
        va_list args;
        char function[KSYM_SYMBOL_LEN];
        unsigned long caller = (unsigned long)__builtin_return_address(0);
+       const char *board;
+
        sprint_symbol(function, caller);
 
        printk(KERN_WARNING "------------[ cut here ]------------\n");
        printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
                line, function);
-       va_start(args, fmt);
-       vprintk(fmt, args);
-       va_end(args);
+       board = dmi_get_system_info(DMI_PRODUCT_NAME);
+       if (board)
+               printk(KERN_WARNING "Hardware name: %s\n", board);
+
+       if (fmt) {
+               va_start(args, fmt);
+               vprintk(fmt, args);
+               va_end(args);
+       }
 
        print_modules();
        dump_stack();
index 4e5288a831de2e696260c12f97d60fdbcfc551a3..157de3a478321ab80c8c05cee5ffd07da68bab39 100644 (file)
@@ -58,21 +58,21 @@ void thread_group_cputime(
        struct task_struct *tsk,
        struct task_cputime *times)
 {
-       struct signal_struct *sig;
+       struct task_cputime *totals, *tot;
        int i;
-       struct task_cputime *tot;
 
-       sig = tsk->signal;
-       if (unlikely(!sig) || !sig->cputime.totals) {
+       totals = tsk->signal->cputime.totals;
+       if (!totals) {
                times->utime = tsk->utime;
                times->stime = tsk->stime;
                times->sum_exec_runtime = tsk->se.sum_exec_runtime;
                return;
        }
+
        times->stime = times->utime = cputime_zero;
        times->sum_exec_runtime = 0;
        for_each_possible_cpu(i) {
-               tot = per_cpu_ptr(tsk->signal->cputime.totals, i);
+               tot = per_cpu_ptr(totals, i);
                times->utime = cputime_add(times->utime, tot->utime);
                times->stime = cputime_add(times->stime, tot->stime);
                times->sum_exec_runtime += tot->sum_exec_runtime;
index a140e44eebbacbe88b989e98b1bbdc744a99c94f..887c63787de6634b9d46ddcdbb386d5d3ffe5250 100644 (file)
@@ -116,7 +116,7 @@ static DEFINE_SPINLOCK(idr_lock);
  *         must supply functions here, even if the function just returns
  *         ENOSYS.  The standard POSIX timer management code assumes the
  *         following: 1.) The k_itimer struct (sched.h) is used for the
- *         timer.  2.) The list, it_lock, it_clock, it_id and it_process
+ *         timer.  2.) The list, it_lock, it_clock, it_id and it_pid
  *         fields are not modified by timer code.
  *
  *          At this time all functions EXCEPT clock_nanosleep can be
@@ -319,7 +319,8 @@ void do_schedule_next_timer(struct siginfo *info)
 
 int posix_timer_event(struct k_itimer *timr, int si_private)
 {
-       int shared, ret;
+       struct task_struct *task;
+       int shared, ret = -1;
        /*
         * FIXME: if ->sigq is queued we can race with
         * dequeue_signal()->do_schedule_next_timer().
@@ -333,8 +334,13 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
         */
        timr->sigq->info.si_sys_private = si_private;
 
-       shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
-       ret = send_sigqueue(timr->sigq, timr->it_process, shared);
+       rcu_read_lock();
+       task = pid_task(timr->it_pid, PIDTYPE_PID);
+       if (task) {
+               shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
+               ret = send_sigqueue(timr->sigq, task, shared);
+       }
+       rcu_read_unlock();
        /* If we failed to send the signal the timer stops. */
        return ret > 0;
 }
@@ -411,7 +417,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
        return ret;
 }
 
-static struct task_struct * good_sigevent(sigevent_t * event)
+static struct pid *good_sigevent(sigevent_t * event)
 {
        struct task_struct *rtn = current->group_leader;
 
@@ -425,7 +431,7 @@ static struct task_struct * good_sigevent(sigevent_t * event)
            ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
                return NULL;
 
-       return rtn;
+       return task_pid(rtn);
 }
 
 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
@@ -464,6 +470,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
                idr_remove(&posix_timers_id, tmr->it_id);
                spin_unlock_irqrestore(&idr_lock, flags);
        }
+       put_pid(tmr->it_pid);
        sigqueue_free(tmr->sigq);
        kmem_cache_free(posix_timers_cache, tmr);
 }
@@ -477,7 +484,6 @@ sys_timer_create(const clockid_t which_clock,
 {
        struct k_itimer *new_timer;
        int error, new_timer_id;
-       struct task_struct *process;
        sigevent_t event;
        int it_id_set = IT_ID_NOT_SET;
 
@@ -531,11 +537,9 @@ sys_timer_create(const clockid_t which_clock,
                        goto out;
                }
                rcu_read_lock();
-               process = good_sigevent(&event);
-               if (process)
-                       get_task_struct(process);
+               new_timer->it_pid = get_pid(good_sigevent(&event));
                rcu_read_unlock();
-               if (!process) {
+               if (!new_timer->it_pid) {
                        error = -EINVAL;
                        goto out;
                }
@@ -543,8 +547,7 @@ sys_timer_create(const clockid_t which_clock,
                event.sigev_notify = SIGEV_SIGNAL;
                event.sigev_signo = SIGALRM;
                event.sigev_value.sival_int = new_timer->it_id;
-               process = current->group_leader;
-               get_task_struct(process);
+               new_timer->it_pid = get_pid(task_tgid(current));
        }
 
        new_timer->it_sigev_notify     = event.sigev_notify;
@@ -554,7 +557,7 @@ sys_timer_create(const clockid_t which_clock,
        new_timer->sigq->info.si_code  = SI_TIMER;
 
        spin_lock_irq(&current->sighand->siglock);
-       new_timer->it_process = process;
+       new_timer->it_signal = current->signal;
        list_add(&new_timer->list, &current->signal->posix_timers);
        spin_unlock_irq(&current->sighand->siglock);
 
@@ -589,8 +592,7 @@ static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
        timr = idr_find(&posix_timers_id, (int)timer_id);
        if (timr) {
                spin_lock(&timr->it_lock);
-               if (timr->it_process &&
-                   same_thread_group(timr->it_process, current)) {
+               if (timr->it_signal == current->signal) {
                        spin_unlock(&idr_lock);
                        return timr;
                }
@@ -837,8 +839,7 @@ retry_delete:
         * This keeps any tasks waiting on the spin lock from thinking
         * they got something (see the lock code above).
         */
-       put_task_struct(timer->it_process);
-       timer->it_process = NULL;
+       timer->it_signal = NULL;
 
        unlock_timer(timer, flags);
        release_posix_timer(timer, IT_ID_SET);
@@ -864,8 +865,7 @@ retry_delete:
         * This keeps any tasks waiting on the spin lock from thinking
         * they got something (see the lock code above).
         */
-       put_task_struct(timer->it_process);
-       timer->it_process = NULL;
+       timer->it_signal = NULL;
 
        unlock_timer(timer, flags);
        release_posix_timer(timer, IT_ID_SET);
index f492f1583d77f7bcb5cedad94352a18a9d5f9919..e651ab05655f9f92b0bc9e9230ff59990eabb17a 100644 (file)
@@ -662,7 +662,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
        if (recursion_bug) {
                recursion_bug = 0;
                strcpy(printk_buf, recursion_bug_msg);
-               printed_len = sizeof(recursion_bug_msg);
+               printed_len = strlen(recursion_bug_msg);
        }
        /* Emit the output into the temporary buffer */
        printed_len += vscnprintf(printk_buf + printed_len,
index 37f72e551542234d2d7d905741e8a5f6fb7532ec..e503a002f330fb1f7ed74ef996384e74ae294d44 100644 (file)
@@ -191,7 +191,7 @@ static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
 
        /* OK, time to rat on our buddy... */
 
-       printk(KERN_ERR "RCU detected CPU stalls:");
+       printk(KERN_ERR "INFO: RCU detected CPU stalls:");
        for_each_possible_cpu(cpu) {
                if (cpu_isset(cpu, rcp->cpumask))
                        printk(" %d", cpu);
@@ -204,7 +204,7 @@ static void print_cpu_stall(struct rcu_ctrlblk *rcp)
 {
        unsigned long flags;
 
-       printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
+       printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
                        smp_processor_id(), jiffies,
                        jiffies - rcp->gp_start);
        dump_stack();
index 59236e8b9daa38e1e92a709e769fa75d857bb41e..04982659875a0fe67b4483874674ad862e3a6640 100644 (file)
@@ -551,6 +551,16 @@ void rcu_irq_exit(void)
        }
 }
 
+void rcu_nmi_enter(void)
+{
+       rcu_irq_enter();
+}
+
+void rcu_nmi_exit(void)
+{
+       rcu_irq_exit();
+}
+
 static void dyntick_save_progress_counter(int cpu)
 {
        struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
index 35c2d3360ecf750be63922a89101f6cfac249107..7c2665cac17220698caa5009b56ec863653083d3 100644 (file)
@@ -149,12 +149,12 @@ static void rcupreempt_trace_sum(struct rcupreempt_trace *sp)
                sp->done_length += cp->done_length;
                sp->done_add += cp->done_add;
                sp->done_remove += cp->done_remove;
-               atomic_set(&sp->done_invoked, atomic_read(&cp->done_invoked));
+               atomic_add(atomic_read(&cp->done_invoked), &sp->done_invoked);
                sp->rcu_check_callbacks += cp->rcu_check_callbacks;
-               atomic_set(&sp->rcu_try_flip_1,
-                          atomic_read(&cp->rcu_try_flip_1));
-               atomic_set(&sp->rcu_try_flip_e1,
-                          atomic_read(&cp->rcu_try_flip_e1));
+               atomic_add(atomic_read(&cp->rcu_try_flip_1),
+                          &sp->rcu_try_flip_1);
+               atomic_add(atomic_read(&cp->rcu_try_flip_e1),
+                          &sp->rcu_try_flip_e1);
                sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1;
                sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1;
                sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1;
index 85cb90588a55ca54348f00a46cdd9199c6bba2bf..b31065522104f1a324404ea721489909d9a845f7 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/moduleparam.h>
 #include <linux/percpu.h>
 #include <linux/notifier.h>
+#include <linux/reboot.h>
 #include <linux/freezer.h>
 #include <linux/cpu.h>
 #include <linux/delay.h>
@@ -108,7 +109,6 @@ struct rcu_torture {
        int rtort_mbtest;
 };
 
-static int fullstop = 0;       /* stop generating callbacks at test end. */
 static LIST_HEAD(rcu_torture_freelist);
 static struct rcu_torture *rcu_torture_current = NULL;
 static long rcu_torture_current_version = 0;
@@ -136,6 +136,30 @@ static int stutter_pause_test = 0;
 #endif
 int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
 
+#define FULLSTOP_SIGNALED 1    /* Bail due to signal. */
+#define FULLSTOP_CLEANUP  2    /* Orderly shutdown. */
+static int fullstop;           /* stop generating callbacks at test end. */
+DEFINE_MUTEX(fullstop_mutex);  /* protect fullstop transitions and */
+                               /*  spawning of kthreads. */
+
+/*
+ * Detect and respond to a signal-based shutdown.
+ */
+static int
+rcutorture_shutdown_notify(struct notifier_block *unused1,
+                          unsigned long unused2, void *unused3)
+{
+       if (fullstop)
+               return NOTIFY_DONE;
+       if (signal_pending(current)) {
+               mutex_lock(&fullstop_mutex);
+               if (!ACCESS_ONCE(fullstop))
+                       fullstop = FULLSTOP_SIGNALED;
+               mutex_unlock(&fullstop_mutex);
+       }
+       return NOTIFY_DONE;
+}
+
 /*
  * Allocate an element from the rcu_tortures pool.
  */
@@ -199,11 +223,12 @@ rcu_random(struct rcu_random_state *rrsp)
 static void
 rcu_stutter_wait(void)
 {
-       while (stutter_pause_test || !rcutorture_runnable)
+       while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) {
                if (rcutorture_runnable)
                        schedule_timeout_interruptible(1);
                else
                        schedule_timeout_interruptible(round_jiffies_relative(HZ));
+       }
 }
 
 /*
@@ -599,7 +624,7 @@ rcu_torture_writer(void *arg)
                rcu_stutter_wait();
        } while (!kthread_should_stop() && !fullstop);
        VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
-       while (!kthread_should_stop())
+       while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
                schedule_timeout_uninterruptible(1);
        return 0;
 }
@@ -624,7 +649,7 @@ rcu_torture_fakewriter(void *arg)
        } while (!kthread_should_stop() && !fullstop);
 
        VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
-       while (!kthread_should_stop())
+       while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
                schedule_timeout_uninterruptible(1);
        return 0;
 }
@@ -734,7 +759,7 @@ rcu_torture_reader(void *arg)
        VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
        if (irqreader && cur_ops->irqcapable)
                del_timer_sync(&t);
-       while (!kthread_should_stop())
+       while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
                schedule_timeout_uninterruptible(1);
        return 0;
 }
@@ -831,7 +856,7 @@ rcu_torture_stats(void *arg)
        do {
                schedule_timeout_interruptible(stat_interval * HZ);
                rcu_torture_stats_print();
-       } while (!kthread_should_stop());
+       } while (!kthread_should_stop() && !fullstop);
        VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
        return 0;
 }
@@ -899,7 +924,7 @@ rcu_torture_shuffle(void *arg)
        do {
                schedule_timeout_interruptible(shuffle_interval * HZ);
                rcu_torture_shuffle_tasks();
-       } while (!kthread_should_stop());
+       } while (!kthread_should_stop() && !fullstop);
        VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
        return 0;
 }
@@ -914,10 +939,10 @@ rcu_torture_stutter(void *arg)
        do {
                schedule_timeout_interruptible(stutter * HZ);
                stutter_pause_test = 1;
-               if (!kthread_should_stop())
+               if (!kthread_should_stop() && !fullstop)
                        schedule_timeout_interruptible(stutter * HZ);
                stutter_pause_test = 0;
-       } while (!kthread_should_stop());
+       } while (!kthread_should_stop() && !fullstop);
        VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
        return 0;
 }
@@ -934,12 +959,27 @@ rcu_torture_print_module_parms(char *tag)
                stutter, irqreader);
 }
 
+static struct notifier_block rcutorture_nb = {
+       .notifier_call = rcutorture_shutdown_notify,
+};
+
 static void
 rcu_torture_cleanup(void)
 {
        int i;
 
-       fullstop = 1;
+       mutex_lock(&fullstop_mutex);
+       if (!fullstop) {
+               /* If being signaled, let it happen, then exit. */
+               mutex_unlock(&fullstop_mutex);
+               schedule_timeout_interruptible(10 * HZ);
+               if (cur_ops->cb_barrier != NULL)
+                       cur_ops->cb_barrier();
+               return;
+       }
+       fullstop = FULLSTOP_CLEANUP;
+       mutex_unlock(&fullstop_mutex);
+       unregister_reboot_notifier(&rcutorture_nb);
        if (stutter_task) {
                VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
                kthread_stop(stutter_task);
@@ -1015,6 +1055,8 @@ rcu_torture_init(void)
                { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
                  &srcu_ops, &sched_ops, &sched_ops_sync, };
 
+       mutex_lock(&fullstop_mutex);
+
        /* Process args and tell the world that the torturer is on the job. */
        for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
                cur_ops = torture_ops[i];
@@ -1024,6 +1066,7 @@ rcu_torture_init(void)
        if (i == ARRAY_SIZE(torture_ops)) {
                printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
                       torture_type);
+               mutex_unlock(&fullstop_mutex);
                return (-EINVAL);
        }
        if (cur_ops->init)
@@ -1146,9 +1189,12 @@ rcu_torture_init(void)
                        goto unwind;
                }
        }
+       register_reboot_notifier(&rcutorture_nb);
+       mutex_unlock(&fullstop_mutex);
        return 0;
 
 unwind:
+       mutex_unlock(&fullstop_mutex);
        rcu_torture_cleanup();
        return firsterr;
 }
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
new file mode 100644 (file)
index 0000000..a342b03
--- /dev/null
@@ -0,0 +1,1535 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2008
+ *
+ * Authors: Dipankar Sarma <dipankar@in.ibm.com>
+ *         Manfred Spraul <manfred@colorfullife.com>
+ *         Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
+ *
+ * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ *     Documentation/RCU
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/time.h>
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key rcu_lock_key;
+struct lockdep_map rcu_lock_map =
+       STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
+EXPORT_SYMBOL_GPL(rcu_lock_map);
+#endif
+
+/* Data structures. */
+
+#define RCU_STATE_INITIALIZER(name) { \
+       .level = { &name.node[0] }, \
+       .levelcnt = { \
+               NUM_RCU_LVL_0,  /* root of hierarchy. */ \
+               NUM_RCU_LVL_1, \
+               NUM_RCU_LVL_2, \
+               NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
+       }, \
+       .signaled = RCU_SIGNAL_INIT, \
+       .gpnum = -300, \
+       .completed = -300, \
+       .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
+       .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
+       .n_force_qs = 0, \
+       .n_force_qs_ngp = 0, \
+}
+
+struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state);
+DEFINE_PER_CPU(struct rcu_data, rcu_data);
+
+struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
+DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
+
+#ifdef CONFIG_NO_HZ
+DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks);
+#endif /* #ifdef CONFIG_NO_HZ */
+
+static int blimit = 10;                /* Maximum callbacks per softirq. */
+static int qhimark = 10000;    /* If this many pending, ignore blimit. */
+static int qlowmark = 100;     /* Once only this many pending, use blimit. */
+
+static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
+
+/*
+ * Return the number of RCU batches processed thus far for debug & stats.
+ */
+long rcu_batches_completed(void)
+{
+       return rcu_state.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+/*
+ * Return the number of RCU BH batches processed thus far for debug & stats.
+ */
+long rcu_batches_completed_bh(void)
+{
+       return rcu_bh_state.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+
+/*
+ * Does the CPU have callbacks ready to be invoked?
+ */
+static int
+cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
+{
+       return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
+}
+
+/*
+ * Does the current CPU require a yet-as-unscheduled grace period?
+ */
+static int
+cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+       /* ACCESS_ONCE() because we are accessing outside of lock. */
+       return *rdp->nxttail[RCU_DONE_TAIL] &&
+              ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum);
+}
+
+/*
+ * Return the root node of the specified rcu_state structure.
+ */
+static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
+{
+       return &rsp->node[0];
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * If the specified CPU is offline, tell the caller that it is in
+ * a quiescent state.  Otherwise, whack it with a reschedule IPI.
+ * Grace periods can end up waiting on an offline CPU when that
+ * CPU is in the process of coming online -- it will be added to the
+ * rcu_node bitmasks before it actually makes it online.  The same thing
+ * can happen while a CPU is in the process of coming online.  Because this
+ * race is quite rare, we check for it after detecting that the grace
+ * period has been delayed rather than checking each and every CPU
+ * each and every time we start a new grace period.
+ */
+static int rcu_implicit_offline_qs(struct rcu_data *rdp)
+{
+       /*
+        * If the CPU is offline, it is in a quiescent state.  We can
+        * trust its state not to change because interrupts are disabled.
+        */
+       if (cpu_is_offline(rdp->cpu)) {
+               rdp->offline_fqs++;
+               return 1;
+       }
+
+       /* The CPU is online, so send it a reschedule IPI. */
+       if (rdp->cpu != smp_processor_id())
+               smp_send_reschedule(rdp->cpu);
+       else
+               set_need_resched();
+       rdp->resched_ipi++;
+       return 0;
+}
+
+#endif /* #ifdef CONFIG_SMP */
+
+#ifdef CONFIG_NO_HZ
+static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5);
+
+/**
+ * rcu_enter_nohz - inform RCU that current CPU is entering nohz
+ *
+ * Enter nohz mode, in other words, -leave- the mode in which RCU
+ * read-side critical sections can occur.  (Though RCU read-side
+ * critical sections can occur in irq handlers in nohz mode, a possibility
+ * handled by rcu_irq_enter() and rcu_irq_exit()).
+ */
+void rcu_enter_nohz(void)
+{
+       unsigned long flags;
+       struct rcu_dynticks *rdtp;
+
+       smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+       local_irq_save(flags);
+       rdtp = &__get_cpu_var(rcu_dynticks);
+       rdtp->dynticks++;
+       rdtp->dynticks_nesting--;
+       WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
+       local_irq_restore(flags);
+}
+
+/*
+ * rcu_exit_nohz - inform RCU that current CPU is leaving nohz
+ *
+ * Exit nohz mode, in other words, -enter- the mode in which RCU
+ * read-side critical sections normally occur.
+ */
+void rcu_exit_nohz(void)
+{
+       unsigned long flags;
+       struct rcu_dynticks *rdtp;
+
+       local_irq_save(flags);
+       rdtp = &__get_cpu_var(rcu_dynticks);
+       rdtp->dynticks++;
+       rdtp->dynticks_nesting++;
+       WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
+       local_irq_restore(flags);
+       smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+}
+
+/**
+ * rcu_nmi_enter - inform RCU of entry to NMI context
+ *
+ * If the CPU was idle with dynamic ticks active, and there is no
+ * irq handler running, this updates rdtp->dynticks_nmi to let the
+ * RCU grace-period handling know that the CPU is active.
+ */
+void rcu_nmi_enter(void)
+{
+       struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+       if (rdtp->dynticks & 0x1)
+               return;
+       rdtp->dynticks_nmi++;
+       WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs);
+       smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+}
+
+/**
+ * rcu_nmi_exit - inform RCU of exit from NMI context
+ *
+ * If the CPU was idle with dynamic ticks active, and there is no
+ * irq handler running, this updates rdtp->dynticks_nmi to let the
+ * RCU grace-period handling know that the CPU is no longer active.
+ */
+void rcu_nmi_exit(void)
+{
+       struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+       if (rdtp->dynticks & 0x1)
+               return;
+       smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+       rdtp->dynticks_nmi++;
+       WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs);
+}
+
+/**
+ * rcu_irq_enter - inform RCU of entry to hard irq context
+ *
+ * If the CPU was idle with dynamic ticks active, this updates the
+ * rdtp->dynticks to let the RCU handling know that the CPU is active.
+ */
+void rcu_irq_enter(void)
+{
+       struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+       if (rdtp->dynticks_nesting++)
+               return;
+       rdtp->dynticks++;
+       WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
+       smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+}
+
+/**
+ * rcu_irq_exit - inform RCU of exit from hard irq context
+ *
+ * If the CPU was idle with dynamic ticks active, update the rdp->dynticks
+ * to put let the RCU handling be aware that the CPU is going back to idle
+ * with no ticks.
+ */
+void rcu_irq_exit(void)
+{
+       struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
+
+       if (--rdtp->dynticks_nesting)
+               return;
+       smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+       rdtp->dynticks++;
+       WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
+
+       /* If the interrupt queued a callback, get out of dyntick mode. */
+       if (__get_cpu_var(rcu_data).nxtlist ||
+           __get_cpu_var(rcu_bh_data).nxtlist)
+               set_need_resched();
+}
+
+/*
+ * Record the specified "completed" value, which is later used to validate
+ * dynticks counter manipulations.  Specify "rsp->completed - 1" to
+ * unconditionally invalidate any future dynticks manipulations (which is
+ * useful at the beginning of a grace period).
+ */
+static void dyntick_record_completed(struct rcu_state *rsp, long comp)
+{
+       rsp->dynticks_completed = comp;
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * Recall the previously recorded value of the completion for dynticks.
+ */
+static long dyntick_recall_completed(struct rcu_state *rsp)
+{
+       return rsp->dynticks_completed;
+}
+
+/*
+ * Snapshot the specified CPU's dynticks counter so that we can later
+ * credit them with an implicit quiescent state.  Return 1 if this CPU
+ * is already in a quiescent state courtesy of dynticks idle mode.
+ */
+static int dyntick_save_progress_counter(struct rcu_data *rdp)
+{
+       int ret;
+       int snap;
+       int snap_nmi;
+
+       snap = rdp->dynticks->dynticks;
+       snap_nmi = rdp->dynticks->dynticks_nmi;
+       smp_mb();       /* Order sampling of snap with end of grace period. */
+       rdp->dynticks_snap = snap;
+       rdp->dynticks_nmi_snap = snap_nmi;
+       ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
+       if (ret)
+               rdp->dynticks_fqs++;
+       return ret;
+}
+
+/*
+ * Return true if the specified CPU has passed through a quiescent
+ * state by virtue of being in or having passed through an dynticks
+ * idle state since the last call to dyntick_save_progress_counter()
+ * for this same CPU.
+ */
+static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+{
+       long curr;
+       long curr_nmi;
+       long snap;
+       long snap_nmi;
+
+       curr = rdp->dynticks->dynticks;
+       snap = rdp->dynticks_snap;
+       curr_nmi = rdp->dynticks->dynticks_nmi;
+       snap_nmi = rdp->dynticks_nmi_snap;
+       smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
+
+       /*
+        * If the CPU passed through or entered a dynticks idle phase with
+        * no active irq/NMI handlers, then we can safely pretend that the CPU
+        * already acknowledged the request to pass through a quiescent
+        * state.  Either way, that CPU cannot possibly be in an RCU
+        * read-side critical section that started before the beginning
+        * of the current RCU grace period.
+        */
+       if ((curr != snap || (curr & 0x1) == 0) &&
+           (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
+               rdp->dynticks_fqs++;
+               return 1;
+       }
+
+       /* Go check for the CPU being offline. */
+       return rcu_implicit_offline_qs(rdp);
+}
+
+#endif /* #ifdef CONFIG_SMP */
+
+#else /* #ifdef CONFIG_NO_HZ */
+
+static void dyntick_record_completed(struct rcu_state *rsp, long comp)
+{
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * If there are no dynticks, then the only way that a CPU can passively
+ * be in a quiescent state is to be offline.  Unlike dynticks idle, which
+ * is a point in time during the prior (already finished) grace period,
+ * an offline CPU is always in a quiescent state, and thus can be
+ * unconditionally applied.  So just return the current value of completed.
+ */
+static long dyntick_recall_completed(struct rcu_state *rsp)
+{
+       return rsp->completed;
+}
+
+static int dyntick_save_progress_counter(struct rcu_data *rdp)
+{
+       return 0;
+}
+
+static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+{
+       return rcu_implicit_offline_qs(rdp);
+}
+
+#endif /* #ifdef CONFIG_SMP */
+
+#endif /* #else #ifdef CONFIG_NO_HZ */
+
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+
+static void record_gp_stall_check_time(struct rcu_state *rsp)
+{
+       rsp->gp_start = jiffies;
+       rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
+}
+
+static void print_other_cpu_stall(struct rcu_state *rsp)
+{
+       int cpu;
+       long delta;
+       unsigned long flags;
+       struct rcu_node *rnp = rcu_get_root(rsp);
+       struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
+       struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
+
+       /* Only let one CPU complain about others per time interval. */
+
+       spin_lock_irqsave(&rnp->lock, flags);
+       delta = jiffies - rsp->jiffies_stall;
+       if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) {
+               spin_unlock_irqrestore(&rnp->lock, flags);
+               return;
+       }
+       rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
+       spin_unlock_irqrestore(&rnp->lock, flags);
+
+       /* OK, time to rat on our buddy... */
+
+       printk(KERN_ERR "INFO: RCU detected CPU stalls:");
+       for (; rnp_cur < rnp_end; rnp_cur++) {
+               if (rnp_cur->qsmask == 0)
+                       continue;
+               for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++)
+                       if (rnp_cur->qsmask & (1UL << cpu))
+                               printk(" %d", rnp_cur->grplo + cpu);
+       }
+       printk(" (detected by %d, t=%ld jiffies)\n",
+              smp_processor_id(), (long)(jiffies - rsp->gp_start));
+       force_quiescent_state(rsp, 0);  /* Kick them all. */
+}
+
+static void print_cpu_stall(struct rcu_state *rsp)
+{
+       unsigned long flags;
+       struct rcu_node *rnp = rcu_get_root(rsp);
+
+       printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
+                       smp_processor_id(), jiffies - rsp->gp_start);
+       dump_stack();
+       spin_lock_irqsave(&rnp->lock, flags);
+       if ((long)(jiffies - rsp->jiffies_stall) >= 0)
+               rsp->jiffies_stall =
+                       jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
+       spin_unlock_irqrestore(&rnp->lock, flags);
+       set_need_resched();  /* kick ourselves to get things going. */
+}
+
+static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+       long delta;
+       struct rcu_node *rnp;
+
+       delta = jiffies - rsp->jiffies_stall;
+       rnp = rdp->mynode;
+       if ((rnp->qsmask & rdp->grpmask) && delta >= 0) {
+
+               /* We haven't checked in, so go dump stack. */
+               print_cpu_stall(rsp);
+
+       } else if (rsp->gpnum != rsp->completed &&
+                  delta >= RCU_STALL_RAT_DELAY) {
+
+               /* They had two time units to dump stack, so complain. */
+               print_other_cpu_stall(rsp);
+       }
+}
+
+#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+static void record_gp_stall_check_time(struct rcu_state *rsp)
+{
+}
+
+static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+/*
+ * Update CPU-local rcu_data state to record the newly noticed grace period.
+ * This is used both when we started the grace period and when we notice
+ * that someone else started the grace period.
+ */
+static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+       rdp->qs_pending = 1;
+       rdp->passed_quiesc = 0;
+       rdp->gpnum = rsp->gpnum;
+       rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
+                                     RCU_JIFFIES_TILL_FORCE_QS;
+}
+
+/*
+ * Did someone else start a new RCU grace period start since we last
+ * checked?  Update local state appropriately if so.  Must be called
+ * on the CPU corresponding to rdp.
+ */
+static int
+check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+       unsigned long flags;
+       int ret = 0;
+
+       local_irq_save(flags);
+       if (rdp->gpnum != rsp->gpnum) {
+               note_new_gpnum(rsp, rdp);
+               ret = 1;
+       }
+       local_irq_restore(flags);
+       return ret;
+}
+
+/*
+ * Start a new RCU grace period if warranted, re-initializing the hierarchy
+ * in preparation for detecting the next grace period.  The caller must hold
+ * the root node's ->lock, which is released before return.  Hard irqs must
+ * be disabled.
+ */
+static void
+rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
+       __releases(rcu_get_root(rsp)->lock)
+{
+       struct rcu_data *rdp = rsp->rda[smp_processor_id()];
+       struct rcu_node *rnp = rcu_get_root(rsp);
+       struct rcu_node *rnp_cur;
+       struct rcu_node *rnp_end;
+
+       if (!cpu_needs_another_gp(rsp, rdp)) {
+               spin_unlock_irqrestore(&rnp->lock, flags);
+               return;
+       }
+
+       /* Advance to a new grace period and initialize state. */
+       rsp->gpnum++;
+       rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
+       rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
+       rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
+                                     RCU_JIFFIES_TILL_FORCE_QS;
+       record_gp_stall_check_time(rsp);
+       dyntick_record_completed(rsp, rsp->completed - 1);
+       note_new_gpnum(rsp, rdp);
+
+       /*
+        * Because we are first, we know that all our callbacks will
+        * be covered by this upcoming grace period, even the ones
+        * that were registered arbitrarily recently.
+        */
+       rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+       rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+
+       /* Special-case the common single-level case. */
+       if (NUM_RCU_NODES == 1) {
+               rnp->qsmask = rnp->qsmaskinit;
+               spin_unlock_irqrestore(&rnp->lock, flags);
+               return;
+       }
+
+       spin_unlock(&rnp->lock);  /* leave irqs disabled. */
+
+
+       /* Exclude any concurrent CPU-hotplug operations. */
+       spin_lock(&rsp->onofflock);  /* irqs already disabled. */
+
+       /*
+        * Set the quiescent-state-needed bits in all the non-leaf RCU
+        * nodes for all currently online CPUs.  This operation relies
+        * on the layout of the hierarchy within the rsp->node[] array.
+        * Note that other CPUs will access only the leaves of the
+        * hierarchy, which still indicate that no grace period is in
+        * progress.  In addition, we have excluded CPU-hotplug operations.
+        *
+        * We therefore do not need to hold any locks.  Any required
+        * memory barriers will be supplied by the locks guarding the
+        * leaf rcu_nodes in the hierarchy.
+        */
+
+       rnp_end = rsp->level[NUM_RCU_LVLS - 1];
+       for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++)
+               rnp_cur->qsmask = rnp_cur->qsmaskinit;
+
+       /*
+        * Now set up the leaf nodes.  Here we must be careful.  First,
+        * we need to hold the lock in order to exclude other CPUs, which
+        * might be contending for the leaf nodes' locks.  Second, as
+        * soon as we initialize a given leaf node, its CPUs might run
+        * up the rest of the hierarchy.  We must therefore acquire locks
+        * for each node that we touch during this stage.  (But we still
+        * are excluding CPU-hotplug operations.)
+        *
+        * Note that the grace period cannot complete until we finish
+        * the initialization process, as there will be at least one
+        * qsmask bit set in the root node until that time, namely the
+        * one corresponding to this CPU.
+        */
+       rnp_end = &rsp->node[NUM_RCU_NODES];
+       rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
+       for (; rnp_cur < rnp_end; rnp_cur++) {
+               spin_lock(&rnp_cur->lock);      /* irqs already disabled. */
+               rnp_cur->qsmask = rnp_cur->qsmaskinit;
+               spin_unlock(&rnp_cur->lock);    /* irqs already disabled. */
+       }
+
+       rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
+       spin_unlock_irqrestore(&rsp->onofflock, flags);
+}
+
+/*
+ * Advance this CPU's callbacks, but only if the current grace period
+ * has ended.  This may be called only from the CPU to whom the rdp
+ * belongs.
+ */
+static void
+rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+       long completed_snap;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       completed_snap = ACCESS_ONCE(rsp->completed);  /* outside of lock. */
+
+       /* Did another grace period end? */
+       if (rdp->completed != completed_snap) {
+
+               /* Advance callbacks.  No harm if list empty. */
+               rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
+               rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
+               rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+
+               /* Remember that we saw this grace-period completion. */
+               rdp->completed = completed_snap;
+       }
+       local_irq_restore(flags);
+}
+
+/*
+ * Similar to cpu_quiet(), for which it is a helper function.  Allows
+ * a group of CPUs to be quieted at one go, though all the CPUs in the
+ * group must be represented by the same leaf rcu_node structure.
+ * That structure's lock must be held upon entry, and it is released
+ * before return.
+ */
+static void
+cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
+             unsigned long flags)
+       __releases(rnp->lock)
+{
+       /* Walk up the rcu_node hierarchy. */
+       for (;;) {
+               if (!(rnp->qsmask & mask)) {
+
+                       /* Our bit has already been cleared, so done. */
+                       spin_unlock_irqrestore(&rnp->lock, flags);
+                       return;
+               }
+               rnp->qsmask &= ~mask;
+               if (rnp->qsmask != 0) {
+
+                       /* Other bits still set at this level, so done. */
+                       spin_unlock_irqrestore(&rnp->lock, flags);
+                       return;
+               }
+               mask = rnp->grpmask;
+               if (rnp->parent == NULL) {
+
+                       /* No more levels.  Exit loop holding root lock. */
+
+                       break;
+               }
+               spin_unlock_irqrestore(&rnp->lock, flags);
+               rnp = rnp->parent;
+               spin_lock_irqsave(&rnp->lock, flags);
+       }
+
+       /*
+        * Get here if we are the last CPU to pass through a quiescent
+        * state for this grace period.  Clean up and let rcu_start_gp()
+        * start up the next grace period if one is needed.  Note that
+        * we still hold rnp->lock, as required by rcu_start_gp(), which
+        * will release it.
+        */
+       rsp->completed = rsp->gpnum;
+       rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
+       rcu_start_gp(rsp, flags);  /* releases rnp->lock. */
+}
+
+/*
+ * Record a quiescent state for the specified CPU, which must either be
+ * the current CPU or an offline CPU.  The lastcomp argument is used to
+ * make sure we are still in the grace period of interest.  We don't want
+ * to end the current grace period based on quiescent states detected in
+ * an earlier grace period!
+ */
+static void
+cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
+{
+       unsigned long flags;
+       unsigned long mask;
+       struct rcu_node *rnp;
+
+       rnp = rdp->mynode;
+       spin_lock_irqsave(&rnp->lock, flags);
+       if (lastcomp != ACCESS_ONCE(rsp->completed)) {
+
+               /*
+                * Someone beat us to it for this grace period, so leave.
+                * The race with GP start is resolved by the fact that we
+                * hold the leaf rcu_node lock, so that the per-CPU bits
+                * cannot yet be initialized -- so we would simply find our
+                * CPU's bit already cleared in cpu_quiet_msk() if this race
+                * occurred.
+                */
+               rdp->passed_quiesc = 0; /* try again later! */
+               spin_unlock_irqrestore(&rnp->lock, flags);
+               return;
+       }
+       mask = rdp->grpmask;
+       if ((rnp->qsmask & mask) == 0) {
+               spin_unlock_irqrestore(&rnp->lock, flags);
+       } else {
+               rdp->qs_pending = 0;
+
+               /*
+                * This GP can't end until cpu checks in, so all of our
+                * callbacks can be processed during the next GP.
+                */
+               rdp = rsp->rda[smp_processor_id()];
+               rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+
+               cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */
+       }
+}
+
+/*
+ * Check to see if there is a new grace period of which this CPU
+ * is not yet aware, and if so, set up local rcu_data state for it.
+ * Otherwise, see if this CPU has just passed through its first
+ * quiescent state for this grace period, and record that fact if so.
+ */
+static void
+rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+       /* If there is now a new grace period, record and return. */
+       if (check_for_new_grace_period(rsp, rdp))
+               return;
+
+       /*
+        * Does this CPU still need to do its part for current grace period?
+        * If no, return and let the other CPUs do their part as well.
+        */
+       if (!rdp->qs_pending)
+               return;
+
+       /*
+        * Was there a quiescent state since the beginning of the grace
+        * period? If no, then exit and wait for the next call.
+        */
+       if (!rdp->passed_quiesc)
+               return;
+
+       /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */
+       cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
+ * and move all callbacks from the outgoing CPU to the current one.
+ */
+static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
+{
+       int i;
+       unsigned long flags;
+       long lastcomp;
+       unsigned long mask;
+       struct rcu_data *rdp = rsp->rda[cpu];
+       struct rcu_data *rdp_me;
+       struct rcu_node *rnp;
+
+       /* Exclude any attempts to start a new grace period. */
+       spin_lock_irqsave(&rsp->onofflock, flags);
+
+       /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
+       rnp = rdp->mynode;
+       mask = rdp->grpmask;    /* rnp->grplo is constant. */
+       do {
+               spin_lock(&rnp->lock);          /* irqs already disabled. */
+               rnp->qsmaskinit &= ~mask;
+               if (rnp->qsmaskinit != 0) {
+                       spin_unlock(&rnp->lock); /* irqs already disabled. */
+                       break;
+               }
+               mask = rnp->grpmask;
+               spin_unlock(&rnp->lock);        /* irqs already disabled. */
+               rnp = rnp->parent;
+       } while (rnp != NULL);
+       lastcomp = rsp->completed;
+
+       spin_unlock(&rsp->onofflock);           /* irqs remain disabled. */
+
+       /* Being offline is a quiescent state, so go record it. */
+       cpu_quiet(cpu, rsp, rdp, lastcomp);
+
+       /*
+        * Move callbacks from the outgoing CPU to the running CPU.
+        * Note that the outgoing CPU is now quiscent, so it is now
+        * (uncharacteristically) safe to access it rcu_data structure.
+        * Note also that we must carefully retain the order of the
+        * outgoing CPU's callbacks in order for rcu_barrier() to work
+        * correctly.  Finally, note that we start all the callbacks
+        * afresh, even those that have passed through a grace period
+        * and are therefore ready to invoke.  The theory is that hotplug
+        * events are rare, and that if they are frequent enough to
+        * indefinitely delay callbacks, you have far worse things to
+        * be worrying about.
+        */
+       rdp_me = rsp->rda[smp_processor_id()];
+       if (rdp->nxtlist != NULL) {
+               *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
+               rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+               rdp->nxtlist = NULL;
+               for (i = 0; i < RCU_NEXT_SIZE; i++)
+                       rdp->nxttail[i] = &rdp->nxtlist;
+               rdp_me->qlen += rdp->qlen;
+               rdp->qlen = 0;
+       }
+       local_irq_restore(flags);
+}
+
+/*
+ * Remove the specified CPU from the RCU hierarchy and move any pending
+ * callbacks that it might have to the current CPU.  This code assumes
+ * that at least one CPU in the system will remain running at all times.
+ * Any attempt to offline -all- CPUs is likely to strand RCU callbacks.
+ */
+static void rcu_offline_cpu(int cpu)
+{
+       __rcu_offline_cpu(cpu, &rcu_state);
+       __rcu_offline_cpu(cpu, &rcu_bh_state);
+}
+
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+
+static void rcu_offline_cpu(int cpu)
+{
+}
+
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
+
+/*
+ * Invoke any RCU callbacks that have made it to the end of their grace
+ * period.  Thottle as specified by rdp->blimit.
+ */
+static void rcu_do_batch(struct rcu_data *rdp)
+{
+       unsigned long flags;
+       struct rcu_head *next, *list, **tail;
+       int count;
+
+       /* If no callbacks are ready, just return.*/
+       if (!cpu_has_callbacks_ready_to_invoke(rdp))
+               return;
+
+       /*
+        * Extract the list of ready callbacks, disabling to prevent
+        * races with call_rcu() from interrupt handlers.
+        */
+       local_irq_save(flags);
+       list = rdp->nxtlist;
+       rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
+       *rdp->nxttail[RCU_DONE_TAIL] = NULL;
+       tail = rdp->nxttail[RCU_DONE_TAIL];
+       for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
+               if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
+                       rdp->nxttail[count] = &rdp->nxtlist;
+       local_irq_restore(flags);
+
+       /* Invoke callbacks. */
+       count = 0;
+       while (list) {
+               next = list->next;
+               prefetch(next);
+               list->func(list);
+               list = next;
+               if (++count >= rdp->blimit)
+                       break;
+       }
+
+       local_irq_save(flags);
+
+       /* Update count, and requeue any remaining callbacks. */
+       rdp->qlen -= count;
+       if (list != NULL) {
+               *tail = rdp->nxtlist;
+               rdp->nxtlist = list;
+               for (count = 0; count < RCU_NEXT_SIZE; count++)
+                       if (&rdp->nxtlist == rdp->nxttail[count])
+                               rdp->nxttail[count] = tail;
+                       else
+                               break;
+       }
+
+       /* Reinstate batch limit if we have worked down the excess. */
+       if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
+               rdp->blimit = blimit;
+
+       local_irq_restore(flags);
+
+       /* Re-raise the RCU softirq if there are callbacks remaining. */
+       if (cpu_has_callbacks_ready_to_invoke(rdp))
+               raise_softirq(RCU_SOFTIRQ);
+}
+
+/*
+ * Check to see if this CPU is in a non-context-switch quiescent state
+ * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
+ * Also schedule the RCU softirq handler.
+ *
+ * This function must be called with hardirqs disabled.  It is normally
+ * invoked from the scheduling-clock interrupt.  If rcu_pending returns
+ * false, there is no point in invoking rcu_check_callbacks().
+ */
+void rcu_check_callbacks(int cpu, int user)
+{
+       if (user ||
+           (idle_cpu(cpu) && !in_softirq() &&
+                               hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+
+               /*
+                * Get here if this CPU took its interrupt from user
+                * mode or from the idle loop, and if this is not a
+                * nested interrupt.  In this case, the CPU is in
+                * a quiescent state, so count it.
+                *
+                * No memory barrier is required here because both
+                * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference
+                * only CPU-local variables that other CPUs neither
+                * access nor modify, at least not while the corresponding
+                * CPU is online.
+                */
+
+               rcu_qsctr_inc(cpu);
+               rcu_bh_qsctr_inc(cpu);
+
+       } else if (!in_softirq()) {
+
+               /*
+                * Get here if this CPU did not take its interrupt from
+                * softirq, in other words, if it is not interrupting
+                * a rcu_bh read-side critical section.  This is an _bh
+                * critical section, so count it.
+                */
+
+               rcu_bh_qsctr_inc(cpu);
+       }
+       raise_softirq(RCU_SOFTIRQ);
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * Scan the leaf rcu_node structures, processing dyntick state for any that
+ * have not yet encountered a quiescent state, using the function specified.
+ * Returns 1 if the current grace period ends while scanning (possibly
+ * because we made it end).
+ */
+static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
+                              int (*f)(struct rcu_data *))
+{
+       unsigned long bit;
+       int cpu;
+       unsigned long flags;
+       unsigned long mask;
+       struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
+       struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
+
+       for (; rnp_cur < rnp_end; rnp_cur++) {
+               mask = 0;
+               spin_lock_irqsave(&rnp_cur->lock, flags);
+               if (rsp->completed != lastcomp) {
+                       spin_unlock_irqrestore(&rnp_cur->lock, flags);
+                       return 1;
+               }
+               if (rnp_cur->qsmask == 0) {
+                       spin_unlock_irqrestore(&rnp_cur->lock, flags);
+                       continue;
+               }
+               cpu = rnp_cur->grplo;
+               bit = 1;
+               for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) {
+                       if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu]))
+                               mask |= bit;
+               }
+               if (mask != 0 && rsp->completed == lastcomp) {
+
+                       /* cpu_quiet_msk() releases rnp_cur->lock. */
+                       cpu_quiet_msk(mask, rsp, rnp_cur, flags);
+                       continue;
+               }
+               spin_unlock_irqrestore(&rnp_cur->lock, flags);
+       }
+       return 0;
+}
+
+/*
+ * Force quiescent states on reluctant CPUs, and also detect which
+ * CPUs are in dyntick-idle mode.
+ */
+static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
+{
+       unsigned long flags;
+       long lastcomp;
+       struct rcu_data *rdp = rsp->rda[smp_processor_id()];
+       struct rcu_node *rnp = rcu_get_root(rsp);
+       u8 signaled;
+
+       if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum))
+               return;  /* No grace period in progress, nothing to force. */
+       if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
+               rsp->n_force_qs_lh++; /* Inexact, can lose counts.  Tough! */
+               return; /* Someone else is already on the job. */
+       }
+       if (relaxed &&
+           (long)(rsp->jiffies_force_qs - jiffies) >= 0 &&
+           (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0)
+               goto unlock_ret; /* no emergency and done recently. */
+       rsp->n_force_qs++;
+       spin_lock(&rnp->lock);
+       lastcomp = rsp->completed;
+       signaled = rsp->signaled;
+       rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
+       rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
+                                     RCU_JIFFIES_TILL_FORCE_QS;
+       if (lastcomp == rsp->gpnum) {
+               rsp->n_force_qs_ngp++;
+               spin_unlock(&rnp->lock);
+               goto unlock_ret;  /* no GP in progress, time updated. */
+       }
+       spin_unlock(&rnp->lock);
+       switch (signaled) {
+       case RCU_GP_INIT:
+
+               break; /* grace period still initializing, ignore. */
+
+       case RCU_SAVE_DYNTICK:
+
+               if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
+                       break; /* So gcc recognizes the dead code. */
+
+               /* Record dyntick-idle state. */
+               if (rcu_process_dyntick(rsp, lastcomp,
+                                       dyntick_save_progress_counter))
+                       goto unlock_ret;
+
+               /* Update state, record completion counter. */
+               spin_lock(&rnp->lock);
+               if (lastcomp == rsp->completed) {
+                       rsp->signaled = RCU_FORCE_QS;
+                       dyntick_record_completed(rsp, lastcomp);
+               }
+               spin_unlock(&rnp->lock);
+               break;
+
+       case RCU_FORCE_QS:
+
+               /* Check dyntick-idle state, send IPI to laggarts. */
+               if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp),
+                                       rcu_implicit_dynticks_qs))
+                       goto unlock_ret;
+
+               /* Leave state in case more forcing is required. */
+
+               break;
+       }
+unlock_ret:
+       spin_unlock_irqrestore(&rsp->fqslock, flags);
+}
+
+#else /* #ifdef CONFIG_SMP */
+
+static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
+{
+       set_need_resched();
+}
+
+#endif /* #else #ifdef CONFIG_SMP */
+
+/*
+ * This does the RCU processing work from softirq context for the
+ * specified rcu_state and rcu_data structures.  This may be called
+ * only from the CPU to whom the rdp belongs.
+ */
+static void
+__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+       unsigned long flags;
+
+       /*
+        * If an RCU GP has gone long enough, go check for dyntick
+        * idle CPUs and, if needed, send resched IPIs.
+        */
+       if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
+           (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
+               force_quiescent_state(rsp, 1);
+
+       /*
+        * Advance callbacks in response to end of earlier grace
+        * period that some other CPU ended.
+        */
+       rcu_process_gp_end(rsp, rdp);
+
+       /* Update RCU state based on any recent quiescent states. */
+       rcu_check_quiescent_state(rsp, rdp);
+
+       /* Does this CPU require a not-yet-started grace period? */
+       if (cpu_needs_another_gp(rsp, rdp)) {
+               spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
+               rcu_start_gp(rsp, flags);  /* releases above lock */
+       }
+
+       /* If there are callbacks ready, invoke them. */
+       rcu_do_batch(rdp);
+}
+
+/*
+ * Do softirq processing for the current CPU.
+ */
+static void rcu_process_callbacks(struct softirq_action *unused)
+{
+       /*
+        * Memory references from any prior RCU read-side critical sections
+        * executed by the interrupted code must be seen before any RCU
+        * grace-period manipulations below.
+        */
+       smp_mb(); /* See above block comment. */
+
+       __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data));
+       __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
+
+       /*
+        * Memory references from any later RCU read-side critical sections
+        * executed by the interrupted code must be seen after any RCU
+        * grace-period manipulations above.
+        */
+       smp_mb(); /* See above block comment. */
+}
+
+static void
+__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
+          struct rcu_state *rsp)
+{
+       unsigned long flags;
+       struct rcu_data *rdp;
+
+       head->func = func;
+       head->next = NULL;
+
+       smp_mb(); /* Ensure RCU update seen before callback registry. */
+
+       /*
+        * Opportunistically note grace-period endings and beginnings.
+        * Note that we might see a beginning right after we see an
+        * end, but never vice versa, since this CPU has to pass through
+        * a quiescent state betweentimes.
+        */
+       local_irq_save(flags);
+       rdp = rsp->rda[smp_processor_id()];
+       rcu_process_gp_end(rsp, rdp);
+       check_for_new_grace_period(rsp, rdp);
+
+       /* Add the callback to our list. */
+       *rdp->nxttail[RCU_NEXT_TAIL] = head;
+       rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
+
+       /* Start a new grace period if one not already started. */
+       if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) {
+               unsigned long nestflag;
+               struct rcu_node *rnp_root = rcu_get_root(rsp);
+
+               spin_lock_irqsave(&rnp_root->lock, nestflag);
+               rcu_start_gp(rsp, nestflag);  /* releases rnp_root->lock. */
+       }
+
+       /* Force the grace period if too many callbacks or too long waiting. */
+       if (unlikely(++rdp->qlen > qhimark)) {
+               rdp->blimit = LONG_MAX;
+               force_quiescent_state(rsp, 0);
+       } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
+                  (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
+               force_quiescent_state(rsp, 1);
+       local_irq_restore(flags);
+}
+
+/*
+ * Queue an RCU callback for invocation after a grace period.
+ */
+void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+       __call_rcu(head, func, &rcu_state);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+/*
+ * Queue an RCU for invocation after a quicker grace period.
+ */
+void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+       __call_rcu(head, func, &rcu_bh_state);
+}
+EXPORT_SYMBOL_GPL(call_rcu_bh);
+
+/*
+ * Check to see if there is any immediate RCU-related work to be done
+ * by the current CPU, for the specified type of RCU, returning 1 if so.
+ * The checks are in order of increasing expense: checks that can be
+ * carried out against CPU-local state are performed first.  However,
+ * we must check for CPU stalls first, else we might not get a chance.
+ */
+static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
+{
+       rdp->n_rcu_pending++;
+
+       /* Check for CPU stalls, if enabled. */
+       check_cpu_stall(rsp, rdp);
+
+       /* Is the RCU core waiting for a quiescent state from this CPU? */
+       if (rdp->qs_pending)
+               return 1;
+
+       /* Does this CPU have callbacks ready to invoke? */
+       if (cpu_has_callbacks_ready_to_invoke(rdp))
+               return 1;
+
+       /* Has RCU gone idle with this CPU needing another grace period? */
+       if (cpu_needs_another_gp(rsp, rdp))
+               return 1;
+
+       /* Has another RCU grace period completed?  */
+       if (ACCESS_ONCE(rsp->completed) != rdp->completed) /* outside of lock */
+               return 1;
+
+       /* Has a new RCU grace period started? */
+       if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) /* outside of lock */
+               return 1;
+
+       /* Has an RCU GP gone long enough to send resched IPIs &c? */
+       if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) &&
+           ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
+            (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0))
+               return 1;
+
+       /* nothing to do */
+       return 0;
+}
+
+/*
+ * Check to see if there is any immediate RCU-related work to be done
+ * by the current CPU, returning 1 if so.  This function is part of the
+ * RCU implementation; it is -not- an exported member of the RCU API.
+ */
+int rcu_pending(int cpu)
+{
+       return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) ||
+              __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu));
+}
+
+/*
+ * Check to see if any future RCU-related work will need to be done
+ * by the current CPU, even if none need be done immediately, returning
+ * 1 if so.  This function is part of the RCU implementation; it is -not-
+ * an exported member of the RCU API.
+ */
+int rcu_needs_cpu(int cpu)
+{
+       /* RCU callbacks either ready or pending? */
+       return per_cpu(rcu_data, cpu).nxtlist ||
+              per_cpu(rcu_bh_data, cpu).nxtlist;
+}
+
+/*
+ * Initialize a CPU's per-CPU RCU data.  We take this "scorched earth"
+ * approach so that we don't have to worry about how long the CPU has
+ * been gone, or whether it ever was online previously.  We do trust the
+ * ->mynode field, as it is constant for a given struct rcu_data and
+ * initialized during early boot.
+ *
+ * Note that only one online or offline event can be happening at a given
+ * time.  Note also that we can accept some slop in the rsp->completed
+ * access due to the fact that this CPU cannot possibly have any RCU
+ * callbacks in flight yet.
+ */
+static void
+rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
+{
+       unsigned long flags;
+       int i;
+       long lastcomp;
+       unsigned long mask;
+       struct rcu_data *rdp = rsp->rda[cpu];
+       struct rcu_node *rnp = rcu_get_root(rsp);
+
+       /* Set up local state, ensuring consistent view of global state. */
+       spin_lock_irqsave(&rnp->lock, flags);
+       lastcomp = rsp->completed;
+       rdp->completed = lastcomp;
+       rdp->gpnum = lastcomp;
+       rdp->passed_quiesc = 0;  /* We could be racing with new GP, */
+       rdp->qs_pending = 1;     /*  so set up to respond to current GP. */
+       rdp->beenonline = 1;     /* We have now been online. */
+       rdp->passed_quiesc_completed = lastcomp - 1;
+       rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
+       rdp->nxtlist = NULL;
+       for (i = 0; i < RCU_NEXT_SIZE; i++)
+               rdp->nxttail[i] = &rdp->nxtlist;
+       rdp->qlen = 0;
+       rdp->blimit = blimit;
+#ifdef CONFIG_NO_HZ
+       rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
+#endif /* #ifdef CONFIG_NO_HZ */
+       rdp->cpu = cpu;
+       spin_unlock(&rnp->lock);                /* irqs remain disabled. */
+
+       /*
+        * A new grace period might start here.  If so, we won't be part
+        * of it, but that is OK, as we are currently in a quiescent state.
+        */
+
+       /* Exclude any attempts to start a new GP on large systems. */
+       spin_lock(&rsp->onofflock);             /* irqs already disabled. */
+
+       /* Add CPU to rcu_node bitmasks. */
+       rnp = rdp->mynode;
+       mask = rdp->grpmask;
+       do {
+               /* Exclude any attempts to start a new GP on small systems. */
+               spin_lock(&rnp->lock);  /* irqs already disabled. */
+               rnp->qsmaskinit |= mask;
+               mask = rnp->grpmask;
+               spin_unlock(&rnp->lock); /* irqs already disabled. */
+               rnp = rnp->parent;
+       } while (rnp != NULL && !(rnp->qsmaskinit & mask));
+
+       spin_unlock(&rsp->onofflock);           /* irqs remain disabled. */
+
+       /*
+        * A new grace period might start here.  If so, we will be part of
+        * it, and its gpnum will be greater than ours, so we will
+        * participate.  It is also possible for the gpnum to have been
+        * incremented before this function was called, and the bitmasks
+        * to not be filled out until now, in which case we will also
+        * participate due to our gpnum being behind.
+        */
+
+       /* Since it is coming online, the CPU is in a quiescent state. */
+       cpu_quiet(cpu, rsp, rdp, lastcomp);
+       local_irq_restore(flags);
+}
+
+static void __cpuinit rcu_online_cpu(int cpu)
+{
+#ifdef CONFIG_NO_HZ
+       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+       rdtp->dynticks_nesting = 1;
+       rdtp->dynticks |= 1;    /* need consecutive #s even for hotplug. */
+       rdtp->dynticks_nmi = (rdtp->dynticks_nmi + 1) & ~0x1;
+#endif /* #ifdef CONFIG_NO_HZ */
+       rcu_init_percpu_data(cpu, &rcu_state);
+       rcu_init_percpu_data(cpu, &rcu_bh_state);
+       open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+}
+
+/*
+ * Handle CPU online/offline notifcation events.
+ */
+static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
+                               unsigned long action, void *hcpu)
+{
+       long cpu = (long)hcpu;
+
+       switch (action) {
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+               rcu_online_cpu(cpu);
+               break;
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+       case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
+               rcu_offline_cpu(cpu);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+/*
+ * Compute the per-level fanout, either using the exact fanout specified
+ * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
+ */
+#ifdef CONFIG_RCU_FANOUT_EXACT
+static void __init rcu_init_levelspread(struct rcu_state *rsp)
+{
+       int i;
+
+       for (i = NUM_RCU_LVLS - 1; i >= 0; i--)
+               rsp->levelspread[i] = CONFIG_RCU_FANOUT;
+}
+#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
+static void __init rcu_init_levelspread(struct rcu_state *rsp)
+{
+       int ccur;
+       int cprv;
+       int i;
+
+       cprv = NR_CPUS;
+       for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
+               ccur = rsp->levelcnt[i];
+               rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
+               cprv = ccur;
+       }
+}
+#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
+
+/*
+ * Helper function for rcu_init() that initializes one rcu_state structure.
+ */
+static void __init rcu_init_one(struct rcu_state *rsp)
+{
+       int cpustride = 1;
+       int i;
+       int j;
+       struct rcu_node *rnp;
+
+       /* Initialize the level-tracking arrays. */
+
+       for (i = 1; i < NUM_RCU_LVLS; i++)
+               rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
+       rcu_init_levelspread(rsp);
+
+       /* Initialize the elements themselves, starting from the leaves. */
+
+       for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
+               cpustride *= rsp->levelspread[i];
+               rnp = rsp->level[i];
+               for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
+                       spin_lock_init(&rnp->lock);
+                       rnp->qsmask = 0;
+                       rnp->qsmaskinit = 0;
+                       rnp->grplo = j * cpustride;
+                       rnp->grphi = (j + 1) * cpustride - 1;
+                       if (rnp->grphi >= NR_CPUS)
+                               rnp->grphi = NR_CPUS - 1;
+                       if (i == 0) {
+                               rnp->grpnum = 0;
+                               rnp->grpmask = 0;
+                               rnp->parent = NULL;
+                       } else {
+                               rnp->grpnum = j % rsp->levelspread[i - 1];
+                               rnp->grpmask = 1UL << rnp->grpnum;
+                               rnp->parent = rsp->level[i - 1] +
+                                             j / rsp->levelspread[i - 1];
+                       }
+                       rnp->level = i;
+               }
+       }
+}
+
+/*
+ * Helper macro for __rcu_init().  To be used nowhere else!
+ * Assigns leaf node pointers into each CPU's rcu_data structure.
+ */
+#define RCU_DATA_PTR_INIT(rsp, rcu_data) \
+do { \
+       rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
+       j = 0; \
+       for_each_possible_cpu(i) { \
+               if (i > rnp[j].grphi) \
+                       j++; \
+               per_cpu(rcu_data, i).mynode = &rnp[j]; \
+               (rsp)->rda[i] = &per_cpu(rcu_data, i); \
+       } \
+} while (0)
+
+static struct notifier_block __cpuinitdata rcu_nb = {
+       .notifier_call  = rcu_cpu_notify,
+};
+
+void __init __rcu_init(void)
+{
+       int i;                  /* All used by RCU_DATA_PTR_INIT(). */
+       int j;
+       struct rcu_node *rnp;
+
+       printk(KERN_WARNING "Experimental hierarchical RCU implementation.\n");
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+       printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+       rcu_init_one(&rcu_state);
+       RCU_DATA_PTR_INIT(&rcu_state, rcu_data);
+       rcu_init_one(&rcu_bh_state);
+       RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data);
+
+       for_each_online_cpu(i)
+               rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i);
+       /* Register notifier for non-boot CPUs */
+       register_cpu_notifier(&rcu_nb);
+       printk(KERN_WARNING "Experimental hierarchical RCU init done.\n");
+}
+
+module_param(blimit, int, 0);
+module_param(qhimark, int, 0);
+module_param(qlowmark, int, 0);
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
new file mode 100644 (file)
index 0000000..d6db3e8
--- /dev/null
@@ -0,0 +1,271 @@
+/*
+ * Read-Copy Update tracing for classic implementation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2008
+ *
+ * Papers:  http://www.rdrop.com/users/paulmck/RCU
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ *             Documentation/RCU
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
+{
+       if (!rdp->beenonline)
+               return;
+       seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x",
+                  rdp->cpu,
+                  cpu_is_offline(rdp->cpu) ? '!' : ' ',
+                  rdp->completed, rdp->gpnum,
+                  rdp->passed_quiesc, rdp->passed_quiesc_completed,
+                  rdp->qs_pending,
+                  rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
+                  (int)(rdp->n_rcu_pending & 0xffff));
+#ifdef CONFIG_NO_HZ
+       seq_printf(m, " dt=%d/%d dn=%d df=%lu",
+                  rdp->dynticks->dynticks,
+                  rdp->dynticks->dynticks_nesting,
+                  rdp->dynticks->dynticks_nmi,
+                  rdp->dynticks_fqs);
+#endif /* #ifdef CONFIG_NO_HZ */
+       seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
+       seq_printf(m, " ql=%ld b=%ld\n", rdp->qlen, rdp->blimit);
+}
+
+#define PRINT_RCU_DATA(name, func, m) \
+       do { \
+               int _p_r_d_i; \
+               \
+               for_each_possible_cpu(_p_r_d_i) \
+                       func(m, &per_cpu(name, _p_r_d_i)); \
+       } while (0)
+
+static int show_rcudata(struct seq_file *m, void *unused)
+{
+       seq_puts(m, "rcu:\n");
+       PRINT_RCU_DATA(rcu_data, print_one_rcu_data, m);
+       seq_puts(m, "rcu_bh:\n");
+       PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m);
+       return 0;
+}
+
+static int rcudata_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, show_rcudata, NULL);
+}
+
+static struct file_operations rcudata_fops = {
+       .owner = THIS_MODULE,
+       .open = rcudata_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
+{
+       if (!rdp->beenonline)
+               return;
+       seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld",
+                  rdp->cpu,
+                  cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"",
+                  rdp->completed, rdp->gpnum,
+                  rdp->passed_quiesc, rdp->passed_quiesc_completed,
+                  rdp->qs_pending,
+                  rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
+                  rdp->n_rcu_pending);
+#ifdef CONFIG_NO_HZ
+       seq_printf(m, ",%d,%d,%d,%lu",
+                  rdp->dynticks->dynticks,
+                  rdp->dynticks->dynticks_nesting,
+                  rdp->dynticks->dynticks_nmi,
+                  rdp->dynticks_fqs);
+#endif /* #ifdef CONFIG_NO_HZ */
+       seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
+       seq_printf(m, ",%ld,%ld\n", rdp->qlen, rdp->blimit);
+}
+
+static int show_rcudata_csv(struct seq_file *m, void *unused)
+{
+       seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\",");
+#ifdef CONFIG_NO_HZ
+       seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
+#endif /* #ifdef CONFIG_NO_HZ */
+       seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n");
+       seq_puts(m, "\"rcu:\"\n");
+       PRINT_RCU_DATA(rcu_data, print_one_rcu_data_csv, m);
+       seq_puts(m, "\"rcu_bh:\"\n");
+       PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m);
+       return 0;
+}
+
+static int rcudata_csv_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, show_rcudata_csv, NULL);
+}
+
+static struct file_operations rcudata_csv_fops = {
+       .owner = THIS_MODULE,
+       .open = rcudata_csv_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
+{
+       int level = 0;
+       struct rcu_node *rnp;
+
+       seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x "
+                     "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n",
+                  rsp->completed, rsp->gpnum, rsp->signaled,
+                  (long)(rsp->jiffies_force_qs - jiffies),
+                  (int)(jiffies & 0xffff),
+                  rsp->n_force_qs, rsp->n_force_qs_ngp,
+                  rsp->n_force_qs - rsp->n_force_qs_ngp,
+                  rsp->n_force_qs_lh);
+       for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) {
+               if (rnp->level != level) {
+                       seq_puts(m, "\n");
+                       level = rnp->level;
+               }
+               seq_printf(m, "%lx/%lx %d:%d ^%d    ",
+                          rnp->qsmask, rnp->qsmaskinit,
+                          rnp->grplo, rnp->grphi, rnp->grpnum);
+       }
+       seq_puts(m, "\n");
+}
+
+static int show_rcuhier(struct seq_file *m, void *unused)
+{
+       seq_puts(m, "rcu:\n");
+       print_one_rcu_state(m, &rcu_state);
+       seq_puts(m, "rcu_bh:\n");
+       print_one_rcu_state(m, &rcu_bh_state);
+       return 0;
+}
+
+static int rcuhier_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, show_rcuhier, NULL);
+}
+
+static struct file_operations rcuhier_fops = {
+       .owner = THIS_MODULE,
+       .open = rcuhier_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int show_rcugp(struct seq_file *m, void *unused)
+{
+       seq_printf(m, "rcu: completed=%ld  gpnum=%ld\n",
+                  rcu_state.completed, rcu_state.gpnum);
+       seq_printf(m, "rcu_bh: completed=%ld  gpnum=%ld\n",
+                  rcu_bh_state.completed, rcu_bh_state.gpnum);
+       return 0;
+}
+
+static int rcugp_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, show_rcugp, NULL);
+}
+
+static struct file_operations rcugp_fops = {
+       .owner = THIS_MODULE,
+       .open = rcugp_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static struct dentry *rcudir, *datadir, *datadir_csv, *hierdir, *gpdir;
+static int __init rcuclassic_trace_init(void)
+{
+       rcudir = debugfs_create_dir("rcu", NULL);
+       if (!rcudir)
+               goto out;
+
+       datadir = debugfs_create_file("rcudata", 0444, rcudir,
+                                               NULL, &rcudata_fops);
+       if (!datadir)
+               goto free_out;
+
+       datadir_csv = debugfs_create_file("rcudata.csv", 0444, rcudir,
+                                               NULL, &rcudata_csv_fops);
+       if (!datadir_csv)
+               goto free_out;
+
+       gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
+       if (!gpdir)
+               goto free_out;
+
+       hierdir = debugfs_create_file("rcuhier", 0444, rcudir,
+                                               NULL, &rcuhier_fops);
+       if (!hierdir)
+               goto free_out;
+       return 0;
+free_out:
+       if (datadir)
+               debugfs_remove(datadir);
+       if (datadir_csv)
+               debugfs_remove(datadir_csv);
+       if (gpdir)
+               debugfs_remove(gpdir);
+       debugfs_remove(rcudir);
+out:
+       return 1;
+}
+
+static void __exit rcuclassic_trace_cleanup(void)
+{
+       debugfs_remove(datadir);
+       debugfs_remove(datadir_csv);
+       debugfs_remove(gpdir);
+       debugfs_remove(hierdir);
+       debugfs_remove(rcudir);
+}
+
+
+module_init(rcuclassic_trace_init);
+module_exit(rcuclassic_trace_cleanup);
+
+MODULE_AUTHOR("Paul E. McKenney");
+MODULE_DESCRIPTION("Read-Copy Update tracing for hierarchical implementation");
+MODULE_LICENSE("GPL");
index 4337063663efe39f8de666d16c591c84f66db0f7..e633106b12f6ffe7f8da4fa48c6105582052e411 100644 (file)
@@ -853,6 +853,15 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
                if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
                    PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
                        continue;
+               /*
+                * if a resource is "BUSY", it's not a hardware resource
+                * but a driver mapping of such a resource; we don't want
+                * to warn for those; some drivers legitimately map only
+                * partial hardware resources. (example: vesafb)
+                */
+               if (p->flags & IORESOURCE_BUSY)
+                       continue;
+
                printk(KERN_WARNING "resource map sanity check conflict: "
                       "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
                       (unsigned long long)addr,
index 748ff924a29056e57f5c30058ce39ea7e470b23e..fff1c4a20b6538966a0cf2b97a012c045d52b84d 100644 (file)
@@ -209,7 +209,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
        hrtimer_init(&rt_b->rt_period_timer,
                        CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        rt_b->rt_period_timer.function = sched_rt_period_timer;
-       rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
 }
 
 static inline int rt_bandwidth_enabled(void)
@@ -1139,7 +1138,6 @@ static void init_rq_hrtick(struct rq *rq)
 
        hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        rq->hrtick_timer.function = hrtick;
-       rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
 }
 #else  /* CONFIG_SCHED_HRTICK */
 static inline void hrtick_clear(struct rq *rq)
@@ -4192,7 +4190,6 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
 
        if (p == rq->idle) {
                p->stime = cputime_add(p->stime, steal);
-               account_group_system_time(p, steal);
                if (atomic_read(&rq->nr_iowait) > 0)
                        cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
                else
@@ -4328,7 +4325,7 @@ void __kprobes sub_preempt_count(int val)
        /*
         * Underflow?
         */
-       if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
+       if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked())))
                return;
        /*
         * Is the spinlock portion underflowing?
index e7c69a720d69a0a7d9166c21883e05d2521cb26a..466e75ce271aa0795ac50266d9d3bc2acdde7261 100644 (file)
@@ -102,20 +102,6 @@ void local_bh_disable(void)
 
 EXPORT_SYMBOL(local_bh_disable);
 
-void __local_bh_enable(void)
-{
-       WARN_ON_ONCE(in_irq());
-
-       /*
-        * softirqs should never be enabled by __local_bh_enable(),
-        * it always nests inside local_bh_enable() sections:
-        */
-       WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
-
-       sub_preempt_count(SOFTIRQ_OFFSET);
-}
-EXPORT_SYMBOL_GPL(__local_bh_enable);
-
 /*
  * Special-case - softirqs can safely be enabled in
  * cond_resched_softirq(), or by __do_softirq(),
@@ -269,6 +255,7 @@ void irq_enter(void)
 {
        int cpu = smp_processor_id();
 
+       rcu_irq_enter();
        if (idle_cpu(cpu) && !in_interrupt()) {
                __irq_enter();
                tick_check_idle(cpu);
@@ -295,9 +282,9 @@ void irq_exit(void)
 
 #ifdef CONFIG_NO_HZ
        /* Make sure that timer wheel updates are propagated */
-       if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
-               tick_nohz_stop_sched_tick(0);
        rcu_irq_exit();
+       if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
+               tick_nohz_stop_sched_tick(0);
 #endif
        preempt_enable_no_resched();
 }
index dc0b3be6b7d52cb98bd84d599bfbb7e7d85b1e85..1ab790c67b174592401712b093bdc56a8aa3d537 100644 (file)
@@ -164,7 +164,7 @@ unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
 /*
  * Zero means infinite timeout - no checking done:
  */
-unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120;
+unsigned long __read_mostly sysctl_hung_task_timeout_secs = 480;
 
 unsigned long __read_mostly sysctl_hung_task_warnings = 10;
 
index 94b527ef1d1e37fe060ab812f13ef7276910549f..eb212f8f8bc801dab1ee7622936ba8caaec4369f 100644 (file)
@@ -6,6 +6,7 @@
  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  */
 #include <linux/sched.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/stacktrace.h>
@@ -24,3 +25,13 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
 }
 EXPORT_SYMBOL_GPL(print_stack_trace);
 
+/*
+ * Architectures that do not implement save_stack_trace_tsk get this
+ * weak alias and a once-per-bootup warning (whenever this facility
+ * is utilized - for example by procfs):
+ */
+__weak void
+save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+       WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
+}
index ebe65c2c9873382a6017c35fb2954ad5bb54029b..d356d79e84ac5682553e76d1bb7f449e5f808e69 100644 (file)
@@ -907,8 +907,8 @@ void do_sys_times(struct tms *tms)
        struct task_cputime cputime;
        cputime_t cutime, cstime;
 
-       spin_lock_irq(&current->sighand->siglock);
        thread_group_cputime(current, &cputime);
+       spin_lock_irq(&current->sighand->siglock);
        cutime = current->signal->cutime;
        cstime = current->signal->cstime;
        spin_unlock_irq(&current->sighand->siglock);
index 0b627d9c93d89c801c5bca63c4e0cdeb7c463fad..ff6d45c7626f09e183913072f0649d075f028c6a 100644 (file)
@@ -121,6 +121,10 @@ extern int sg_big_buff;
 #include <asm/system.h>
 #endif
 
+#ifdef CONFIG_SPARC64
+extern int sysctl_tsb_ratio;
+#endif
+
 #ifdef __hppa__
 extern int pwrsw_enabled;
 extern int unaligned_enabled;
@@ -451,6 +455,16 @@ static struct ctl_table kern_table[] = {
                .proc_handler   = &proc_dointvec,
        },
 #endif
+#ifdef CONFIG_SPARC64
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "tsb-ratio",
+               .data           = &sysctl_tsb_ratio,
+               .maxlen         = sizeof (int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec,
+       },
+#endif
 #ifdef __hppa__
        {
                .ctl_name       = KERN_HPPA_PWRSW,
index 8ff15e5d486b137e65f96c64b5a206315bab7ee4..f5f793d924151736d92034ea0accd37fb354eef3 100644 (file)
@@ -131,7 +131,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
 {
        enum hrtimer_restart res = HRTIMER_NORESTART;
 
-       write_seqlock_irq(&xtime_lock);
+       write_seqlock(&xtime_lock);
 
        switch (time_state) {
        case TIME_OK:
@@ -164,7 +164,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
        }
        update_vsyscall(&xtime, clock);
 
-       write_sequnlock_irq(&xtime_lock);
+       write_sequnlock(&xtime_lock);
 
        return res;
 }
index 342fc9ccab46d5132aae1c58b1fbce0d02a2feaf..8f3fc2582d38b7073927e9dc004ef38e16909ab0 100644 (file)
@@ -247,7 +247,7 @@ void tick_nohz_stop_sched_tick(int inidle)
        if (need_resched())
                goto end;
 
-       if (unlikely(local_softirq_pending())) {
+       if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
                static int ratelimit;
 
                if (ratelimit < 10) {
@@ -282,8 +282,31 @@ void tick_nohz_stop_sched_tick(int inidle)
        /* Schedule the tick, if we are at least one jiffie off */
        if ((long)delta_jiffies >= 1) {
 
+               /*
+               * calculate the expiry time for the next timer wheel
+               * timer
+               */
+               expires = ktime_add_ns(last_update, tick_period.tv64 *
+                                  delta_jiffies);
+
+               /*
+                * If this cpu is the one which updates jiffies, then
+                * give up the assignment and let it be taken by the
+                * cpu which runs the tick timer next, which might be
+                * this cpu as well. If we don't drop this here the
+                * jiffies might be stale and do_timer() never
+                * invoked.
+                */
+               if (cpu == tick_do_timer_cpu)
+                       tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+
                if (delta_jiffies > 1)
                        cpu_set(cpu, nohz_cpu_mask);
+
+               /* Skip reprogram of event if its not changed */
+               if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
+                       goto out;
+
                /*
                 * nohz_stop_sched_tick can be called several times before
                 * the nohz_restart_sched_tick is called. This happens when
@@ -306,17 +329,6 @@ void tick_nohz_stop_sched_tick(int inidle)
                        rcu_enter_nohz();
                }
 
-               /*
-                * If this cpu is the one which updates jiffies, then
-                * give up the assignment and let it be taken by the
-                * cpu which runs the tick timer next, which might be
-                * this cpu as well. If we don't drop this here the
-                * jiffies might be stale and do_timer() never
-                * invoked.
-                */
-               if (cpu == tick_do_timer_cpu)
-                       tick_do_timer_cpu = TICK_DO_TIMER_NONE;
-
                ts->idle_sleeps++;
 
                /*
@@ -332,12 +344,7 @@ void tick_nohz_stop_sched_tick(int inidle)
                        goto out;
                }
 
-               /*
-                * calculate the expiry time for the next timer wheel
-                * timer
-                */
-               expires = ktime_add_ns(last_update, tick_period.tv64 *
-                                      delta_jiffies);
+               /* Mark expiries */
                ts->idle_expires = expires;
 
                if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
@@ -681,7 +688,6 @@ void tick_setup_sched_timer(void)
         */
        hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
        ts->sched_timer.function = tick_sched_timer;
-       ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
 
        /* Get the next period (per cpu) */
        hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
index 01becf1f19ff78fad129aa6474ec550c428519ae..a5779bd975db0a386cd67d67629a2bf53e72764e 100644 (file)
@@ -202,7 +202,6 @@ static void start_stack_timer(int cpu)
 
        hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        hrtimer->function = stack_trace_timer_fn;
-       hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
 
        hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
 }
index af65ae7f05494ba9298550ee60c0668df6e97aca..2e75478e9c696bc6933ca8952c0a013dd30eb78a 100644 (file)
@@ -252,6 +252,14 @@ config DEBUG_OBJECTS_TIMERS
          timer routines to track the life time of timer objects and
          validate the timer operations.
 
+config DEBUG_OBJECTS_ENABLE_DEFAULT
+       int "debug_objects bootup default value (0-1)"
+        range 0 1
+        default "1"
+        depends on DEBUG_OBJECTS
+        help
+          Debug objects boot parameter default value
+
 config DEBUG_SLAB
        bool "Debug slab memory allocations"
        depends on DEBUG_KERNEL && SLAB
@@ -545,6 +553,16 @@ config DEBUG_SG
 
          If unsure, say N.
 
+config DEBUG_NOTIFIERS
+       bool "Debug notifier call chains"
+       depends on DEBUG_KERNEL
+       help
+         Enable this to turn on sanity checking for notifier call chains.
+         This is most useful for kernel developers to make sure that
+         modules properly unregister themselves from notifier chains.
+         This is a relatively cheap check but if you care about maximum
+         performance, say N.
+
 config FRAME_POINTER
        bool "Compile the kernel with frame pointers"
        depends on DEBUG_KERNEL && \
@@ -619,6 +637,19 @@ config RCU_CPU_STALL_DETECTOR
 
          Say N if you are unsure.
 
+config RCU_CPU_STALL_DETECTOR
+       bool "Check for stalled CPUs delaying RCU grace periods"
+       depends on CLASSIC_RCU || TREE_RCU
+       default n
+       help
+         This option causes RCU to printk information on which
+         CPUs are delaying the current grace period, but only when
+         the grace period extends for excessive time periods.
+
+         Say Y if you want RCU to perform such checks.
+
+         Say N if you are unsure.
+
 config KPROBES_SANITY_TEST
        bool "Kprobes sanity tests"
        depends on DEBUG_KERNEL
index e3ab374e1334ab80fc172c3d256d2eb4fddd665e..5d99be1fd988bbd3df0f73f07fbee5e079d4cb53 100644 (file)
@@ -45,7 +45,9 @@ static struct kmem_cache      *obj_cache;
 static int                     debug_objects_maxchain __read_mostly;
 static int                     debug_objects_fixups __read_mostly;
 static int                     debug_objects_warnings __read_mostly;
-static int                     debug_objects_enabled __read_mostly;
+static int                     debug_objects_enabled __read_mostly
+                               = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
+
 static struct debug_obj_descr  *descr_test  __read_mostly;
 
 static int __init enable_object_debug(char *str)
index 5f6c629a924d8da3a5e126b6fa76e12e562fe768..fa2dc4e5f9baca6a9ae5c71534c8b4caa16557d6 100644 (file)
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
+#include <linux/swiotlb.h>
 #include <linux/string.h>
+#include <linux/swiotlb.h>
 #include <linux/types.h>
 #include <linux/ctype.h>
+#include <linux/highmem.h>
 
 #include <asm/io.h>
 #include <asm/dma.h>
 #define OFFSET(val,align) ((unsigned long)     \
                           ( (val) & ( (align) - 1)))
 
-#define SG_ENT_VIRT_ADDRESS(sg)        (sg_virt((sg)))
-#define SG_ENT_PHYS_ADDRESS(sg)        virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
-
-/*
- * Maximum allowable number of contiguous slabs to map,
- * must be a power of 2.  What is the appropriate value ?
- * The complexity of {map,unmap}_single is linearly dependent on this value.
- */
-#define IO_TLB_SEGSIZE 128
-
-/*
- * log of the size of each IO TLB slab.  The number of slabs is command line
- * controllable.
- */
-#define IO_TLB_SHIFT 11
-
 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
 
 /*
@@ -102,7 +89,10 @@ static unsigned int io_tlb_index;
  * We need to save away the original address corresponding to a mapped entry
  * for the sync operations.
  */
-static unsigned char **io_tlb_orig_addr;
+static struct swiotlb_phys_addr {
+       struct page *page;
+       unsigned int offset;
+} *io_tlb_orig_addr;
 
 /*
  * Protect the above data structures in the map and unmap calls
@@ -126,6 +116,72 @@ setup_io_tlb_npages(char *str)
 __setup("swiotlb=", setup_io_tlb_npages);
 /* make io_tlb_overflow tunable too? */
 
+void * __weak swiotlb_alloc_boot(size_t size, unsigned long nslabs)
+{
+       return alloc_bootmem_low_pages(size);
+}
+
+void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
+{
+       return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
+}
+
+dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr)
+{
+       return paddr;
+}
+
+phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
+{
+       return baddr;
+}
+
+static dma_addr_t swiotlb_virt_to_bus(volatile void *address)
+{
+       return swiotlb_phys_to_bus(virt_to_phys(address));
+}
+
+static void *swiotlb_bus_to_virt(dma_addr_t address)
+{
+       return phys_to_virt(swiotlb_bus_to_phys(address));
+}
+
+int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
+{
+       return 0;
+}
+
+static dma_addr_t swiotlb_sg_to_bus(struct scatterlist *sg)
+{
+       return swiotlb_phys_to_bus(page_to_phys(sg_page(sg)) + sg->offset);
+}
+
+static void swiotlb_print_info(unsigned long bytes)
+{
+       phys_addr_t pstart, pend;
+       dma_addr_t bstart, bend;
+
+       pstart = virt_to_phys(io_tlb_start);
+       pend = virt_to_phys(io_tlb_end);
+
+       bstart = swiotlb_phys_to_bus(pstart);
+       bend = swiotlb_phys_to_bus(pend);
+
+       printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
+              bytes >> 20, io_tlb_start, io_tlb_end);
+       if (pstart != bstart || pend != bend)
+               printk(KERN_INFO "software IO TLB at phys %#llx - %#llx"
+                      " bus %#llx - %#llx\n",
+                      (unsigned long long)pstart,
+                      (unsigned long long)pend,
+                      (unsigned long long)bstart,
+                      (unsigned long long)bend);
+       else
+               printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
+                      (unsigned long long)pstart,
+                      (unsigned long long)pend);
+}
+
 /*
  * Statically reserve bounce buffer space and initialize bounce buffer data
  * structures for the software IO TLB used to implement the DMA API.
@@ -145,7 +201,7 @@ swiotlb_init_with_default_size(size_t default_size)
        /*
         * Get IO TLB memory from the low pages
         */
-       io_tlb_start = alloc_bootmem_low_pages(bytes);
+       io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs);
        if (!io_tlb_start)
                panic("Cannot allocate SWIOTLB buffer");
        io_tlb_end = io_tlb_start + bytes;
@@ -159,7 +215,7 @@ swiotlb_init_with_default_size(size_t default_size)
        for (i = 0; i < io_tlb_nslabs; i++)
                io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
        io_tlb_index = 0;
-       io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
+       io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr));
 
        /*
         * Get the overflow emergency buffer
@@ -168,8 +224,7 @@ swiotlb_init_with_default_size(size_t default_size)
        if (!io_tlb_overflow_buffer)
                panic("Cannot allocate SWIOTLB overflow buffer!\n");
 
-       printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
-              virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
+       swiotlb_print_info(bytes);
 }
 
 void __init
@@ -202,8 +257,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
        bytes = io_tlb_nslabs << IO_TLB_SHIFT;
 
        while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
-               io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
-                                                       order);
+               io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs);
                if (io_tlb_start)
                        break;
                order--;
@@ -235,12 +289,12 @@ swiotlb_late_init_with_default_size(size_t default_size)
                io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
        io_tlb_index = 0;
 
-       io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
-                                  get_order(io_tlb_nslabs * sizeof(char *)));
+       io_tlb_orig_addr = (struct swiotlb_phys_addr *)__get_free_pages(GFP_KERNEL,
+                                  get_order(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)));
        if (!io_tlb_orig_addr)
                goto cleanup3;
 
-       memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
+       memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(struct swiotlb_phys_addr));
 
        /*
         * Get the overflow emergency buffer
@@ -250,9 +304,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
        if (!io_tlb_overflow_buffer)
                goto cleanup4;
 
-       printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
-              "0x%lx\n", bytes >> 20,
-              virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
+       swiotlb_print_info(bytes);
 
        return 0;
 
@@ -279,16 +331,69 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
        return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
 }
 
+static inline int range_needs_mapping(void *ptr, size_t size)
+{
+       return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size);
+}
+
 static int is_swiotlb_buffer(char *addr)
 {
        return addr >= io_tlb_start && addr < io_tlb_end;
 }
 
+static struct swiotlb_phys_addr swiotlb_bus_to_phys_addr(char *dma_addr)
+{
+       int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
+       struct swiotlb_phys_addr buffer = io_tlb_orig_addr[index];
+       buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
+       buffer.page += buffer.offset >> PAGE_SHIFT;
+       buffer.offset &= PAGE_SIZE - 1;
+       return buffer;
+}
+
+static void
+__sync_single(struct swiotlb_phys_addr buffer, char *dma_addr, size_t size, int dir)
+{
+       if (PageHighMem(buffer.page)) {
+               size_t len, bytes;
+               char *dev, *host, *kmp;
+
+               len = size;
+               while (len != 0) {
+                       unsigned long flags;
+
+                       bytes = len;
+                       if ((bytes + buffer.offset) > PAGE_SIZE)
+                               bytes = PAGE_SIZE - buffer.offset;
+                       local_irq_save(flags); /* protects KM_BOUNCE_READ */
+                       kmp  = kmap_atomic(buffer.page, KM_BOUNCE_READ);
+                       dev  = dma_addr + size - len;
+                       host = kmp + buffer.offset;
+                       if (dir == DMA_FROM_DEVICE)
+                               memcpy(host, dev, bytes);
+                       else
+                               memcpy(dev, host, bytes);
+                       kunmap_atomic(kmp, KM_BOUNCE_READ);
+                       local_irq_restore(flags);
+                       len -= bytes;
+                       buffer.page++;
+                       buffer.offset = 0;
+               }
+       } else {
+               void *v = page_address(buffer.page) + buffer.offset;
+
+               if (dir == DMA_TO_DEVICE)
+                       memcpy(dma_addr, v, size);
+               else
+                       memcpy(v, dma_addr, size);
+       }
+}
+
 /*
  * Allocates bounce buffer and returns its kernel virtual address.
  */
 static void *
-map_single(struct device *hwdev, char *buffer, size_t size, int dir)
+map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, int dir)
 {
        unsigned long flags;
        char *dma_addr;
@@ -298,11 +403,16 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
        unsigned long mask;
        unsigned long offset_slots;
        unsigned long max_slots;
+       struct swiotlb_phys_addr slot_buf;
 
        mask = dma_get_seg_boundary(hwdev);
-       start_dma_addr = virt_to_bus(io_tlb_start) & mask;
+       start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask;
 
        offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+
+       /*
+        * Carefully handle integer overflow which can occur when mask == ~0UL.
+        */
        max_slots = mask + 1
                    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
                    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
@@ -378,10 +488,15 @@ found:
         * This is needed when we sync the memory.  Then we sync the buffer if
         * needed.
         */
-       for (i = 0; i < nslots; i++)
-               io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT);
+       slot_buf = buffer;
+       for (i = 0; i < nslots; i++) {
+               slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
+               slot_buf.offset &= PAGE_SIZE - 1;
+               io_tlb_orig_addr[index+i] = slot_buf;
+               slot_buf.offset += 1 << IO_TLB_SHIFT;
+       }
        if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
-               memcpy(dma_addr, buffer, size);
+               __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
 
        return dma_addr;
 }
@@ -395,17 +510,17 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
        unsigned long flags;
        int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
        int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
-       char *buffer = io_tlb_orig_addr[index];
+       struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr);
 
        /*
         * First, sync the memory before unmapping the entry
         */
-       if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+       if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
                /*
                 * bounce... copy the data back into the original buffer * and
                 * delete the bounce buffer.
                 */
-               memcpy(buffer, dma_addr, size);
+               __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
 
        /*
         * Return the buffer to the free list by setting the corresponding
@@ -437,21 +552,18 @@ static void
 sync_single(struct device *hwdev, char *dma_addr, size_t size,
            int dir, int target)
 {
-       int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
-       char *buffer = io_tlb_orig_addr[index];
-
-       buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
+       struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr);
 
        switch (target) {
        case SYNC_FOR_CPU:
                if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       memcpy(buffer, dma_addr, size);
+                       __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
                else
                        BUG_ON(dir != DMA_TO_DEVICE);
                break;
        case SYNC_FOR_DEVICE:
                if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       memcpy(dma_addr, buffer, size);
+                       __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
                else
                        BUG_ON(dir != DMA_FROM_DEVICE);
                break;
@@ -473,7 +585,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                dma_mask = hwdev->coherent_dma_mask;
 
        ret = (void *)__get_free_pages(flags, order);
-       if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) {
+       if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) {
                /*
                 * The allocated memory isn't reachable by the device.
                 * Fall back on swiotlb_map_single().
@@ -488,13 +600,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                 * swiotlb_map_single(), which will grab memory from
                 * the lowest available address range.
                 */
-               ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
+               struct swiotlb_phys_addr buffer;
+               buffer.page = virt_to_page(NULL);
+               buffer.offset = 0;
+               ret = map_single(hwdev, buffer, size, DMA_FROM_DEVICE);
                if (!ret)
                        return NULL;
        }
 
        memset(ret, 0, size);
-       dev_addr = virt_to_bus(ret);
+       dev_addr = swiotlb_virt_to_bus(ret);
 
        /* Confirm address can be DMA'd by device */
        if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
@@ -554,8 +669,9 @@ dma_addr_t
 swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
                         int dir, struct dma_attrs *attrs)
 {
-       dma_addr_t dev_addr = virt_to_bus(ptr);
+       dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr);
        void *map;
+       struct swiotlb_phys_addr buffer;
 
        BUG_ON(dir == DMA_NONE);
        /*
@@ -563,19 +679,22 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
         * we can safely return the device addr and not worry about bounce
         * buffering it.
         */
-       if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
+       if (!address_needs_mapping(hwdev, dev_addr, size) &&
+           !range_needs_mapping(ptr, size))
                return dev_addr;
 
        /*
         * Oh well, have to allocate and map a bounce buffer.
         */
-       map = map_single(hwdev, ptr, size, dir);
+       buffer.page   = virt_to_page(ptr);
+       buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
+       map = map_single(hwdev, buffer, size, dir);
        if (!map) {
                swiotlb_full(hwdev, size, dir, 1);
                map = io_tlb_overflow_buffer;
        }
 
-       dev_addr = virt_to_bus(map);
+       dev_addr = swiotlb_virt_to_bus(map);
 
        /*
         * Ensure that the address returned is DMA'ble
@@ -605,7 +724,7 @@ void
 swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
                           size_t size, int dir, struct dma_attrs *attrs)
 {
-       char *dma_addr = bus_to_virt(dev_addr);
+       char *dma_addr = swiotlb_bus_to_virt(dev_addr);
 
        BUG_ON(dir == DMA_NONE);
        if (is_swiotlb_buffer(dma_addr))
@@ -635,7 +754,7 @@ static void
 swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
                    size_t size, int dir, int target)
 {
-       char *dma_addr = bus_to_virt(dev_addr);
+       char *dma_addr = swiotlb_bus_to_virt(dev_addr);
 
        BUG_ON(dir == DMA_NONE);
        if (is_swiotlb_buffer(dma_addr))
@@ -666,7 +785,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
                          unsigned long offset, size_t size,
                          int dir, int target)
 {
-       char *dma_addr = bus_to_virt(dev_addr) + offset;
+       char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset;
 
        BUG_ON(dir == DMA_NONE);
        if (is_swiotlb_buffer(dma_addr))
@@ -714,18 +833,20 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
                     int dir, struct dma_attrs *attrs)
 {
        struct scatterlist *sg;
-       void *addr;
+       struct swiotlb_phys_addr buffer;
        dma_addr_t dev_addr;
        int i;
 
        BUG_ON(dir == DMA_NONE);
 
        for_each_sg(sgl, sg, nelems, i) {
-               addr = SG_ENT_VIRT_ADDRESS(sg);
-               dev_addr = virt_to_bus(addr);
-               if (swiotlb_force ||
+               dev_addr = swiotlb_sg_to_bus(sg);
+               if (range_needs_mapping(sg_virt(sg), sg->length) ||
                    address_needs_mapping(hwdev, dev_addr, sg->length)) {
-                       void *map = map_single(hwdev, addr, sg->length, dir);
+                       void *map;
+                       buffer.page   = sg_page(sg);
+                       buffer.offset = sg->offset;
+                       map = map_single(hwdev, buffer, sg->length, dir);
                        if (!map) {
                                /* Don't panic here, we expect map_sg users
                                   to do proper error handling. */
@@ -735,7 +856,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
                                sgl[0].dma_length = 0;
                                return 0;
                        }
-                       sg->dma_address = virt_to_bus(map);
+                       sg->dma_address = swiotlb_virt_to_bus(map);
                } else
                        sg->dma_address = dev_addr;
                sg->dma_length = sg->length;
@@ -765,11 +886,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
        BUG_ON(dir == DMA_NONE);
 
        for_each_sg(sgl, sg, nelems, i) {
-               if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-                       unmap_single(hwdev, bus_to_virt(sg->dma_address),
+               if (sg->dma_address != swiotlb_sg_to_bus(sg))
+                       unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
                                     sg->dma_length, dir);
                else if (dir == DMA_FROM_DEVICE)
-                       dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
+                       dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
        }
 }
 EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
@@ -798,11 +919,11 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
        BUG_ON(dir == DMA_NONE);
 
        for_each_sg(sgl, sg, nelems, i) {
-               if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-                       sync_single(hwdev, bus_to_virt(sg->dma_address),
+               if (sg->dma_address != swiotlb_sg_to_bus(sg))
+                       sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
                                    sg->dma_length, dir, target);
                else if (dir == DMA_FROM_DEVICE)
-                       dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
+                       dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
        }
 }
 
@@ -823,7 +944,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 int
 swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
 {
-       return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
+       return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer));
 }
 
 /*
@@ -835,7 +956,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
 int
 swiotlb_dma_supported(struct device *hwdev, u64 mask)
 {
-       return virt_to_bus(io_tlb_end - 1) <= mask;
+       return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask;
 }
 
 EXPORT_SYMBOL(swiotlb_map_single);
index bf0cf7c8387b8d92c93dfb18436a7f2cf424cb00..e590272fe7a8f3e40acb21059bb0082f74354f26 100644 (file)
@@ -198,8 +198,13 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
                /*
                 * irk, bounce it
                 */
-               if (!bio)
-                       bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt);
+               if (!bio) {
+                       unsigned int cnt = (*bio_orig)->bi_vcnt;
+
+                       bio = bio_alloc(GFP_NOIO, cnt);
+                       memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
+               }
+                       
 
                to = bio->bi_io_vec + i;
 
index f01b7eed6e16c4e3f32e039b7f2223ca598f40aa..0a2010a9518c499efe694c9ed0d42f5bd18e459b 100644 (file)
@@ -3075,3 +3075,18 @@ void print_vma_addr(char *prefix, unsigned long ip)
        }
        up_read(&current->mm->mmap_sem);
 }
+
+#ifdef CONFIG_PROVE_LOCKING
+void might_fault(void)
+{
+       might_sleep();
+       /*
+        * it would be nicer only to annotate paths which are not under
+        * pagefault_disable, however that requires a larger audit and
+        * providing helpers like get_user_atomic.
+        */
+       if (!in_atomic() && current->mm)
+               might_lock_read(&current->mm->mmap_sem);
+}
+EXPORT_SYMBOL(might_fault);
+#endif
index 7c72baa02f2e93ff09c76be260201da3315e32e5..6688765bd8b96fcf927db7590a7e1fb5ab3bed1a 100644 (file)
@@ -838,11 +838,11 @@ static long get_instantiation_keyring(key_serial_t ringid,
 {
        key_ref_t dkref;
 
+       *_dest_keyring = NULL;
+
        /* just return a NULL pointer if we weren't asked to make a link */
-       if (ringid == 0) {
-               *_dest_keyring = NULL;
+       if (ringid == 0)
                return 0;
-       }
 
        /* if a specific keyring is nominated by ID, then use that */
        if (ringid > 0) {
index c1d285921f807cc6ce1b626a87d2cf6684ecf89d..34c7d48f5061b9fd2025a23e78ac63c4bcd3fd55 100644 (file)
@@ -57,7 +57,6 @@ static int snd_hrtimer_open(struct snd_timer *t)
                return -ENOMEM;
        hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        stime->timer = t;
-       stime->hrt.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
        stime->hrt.function = snd_hrtimer_callback;
        t->private_data = stime;
        return 0;
index 2a02f704f366b31c58947e3d409cf5fd5212d06c..a4049eb94d35a28bafa41072e6e44af726d6f251 100644 (file)
@@ -96,7 +96,6 @@ static int __devinit snd_card_pcsp_probe(int devnum, struct device *dev)
                return -EINVAL;
 
        hrtimer_init(&pcsp_chip.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       pcsp_chip.timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
        pcsp_chip.timer.function = pcsp_do_timer;
 
        card = snd_card_new(index, id, THIS_MODULE, 0);