Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
authorLinus Torvalds <torvalds@g5.osdl.org>
Tue, 31 Jan 2006 23:22:29 +0000 (15:22 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Tue, 31 Jan 2006 23:22:29 +0000 (15:22 -0800)
574 files changed:
Documentation/drivers/edac/edac.txt [new file with mode: 0644]
Documentation/scsi/ChangeLog.megaraid_sas [new file with mode: 0644]
Documentation/scsi/aic79xx.txt
Documentation/scsi/aic7xxx.txt
Documentation/sysctl/vm.txt
MAINTAINERS
arch/alpha/kernel/osf_sys.c
arch/arm/boot/compressed/head.S
arch/arm/configs/bast_defconfig
arch/arm/configs/collie_defconfig
arch/arm/configs/ep80219_defconfig
arch/arm/configs/iq31244_defconfig
arch/arm/configs/iq80321_defconfig
arch/arm/configs/iq80331_defconfig
arch/arm/configs/iq80332_defconfig
arch/arm/configs/s3c2410_defconfig
arch/arm/kernel/calls.S
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/signal.c
arch/arm/kernel/signal.h
arch/arm/mach-integrator/integrator_cp.c
arch/arm/mach-ixp4xx/common.c
arch/arm/mach-omap1/clock.c
arch/arm/mach-omap1/clock.h
arch/arm/mach-omap1/serial.c
arch/arm/mach-omap2/clock.c
arch/arm/mach-omap2/clock.h
arch/arm/mach-omap2/serial.c
arch/arm/mach-omap2/timer-gp.c
arch/arm/mach-s3c2410/cpu.h
arch/arm/mach-s3c2410/devs.c
arch/arm/mach-s3c2410/dma.c
arch/arm/mach-s3c2410/sleep.S
arch/arm/mm/ioremap.c
arch/arm/mm/mm-armv.c
arch/arm/plat-omap/clock.c
arch/arm/plat-omap/gpio.c
arch/arm/plat-omap/mcbsp.c
arch/arm/plat-omap/ocpi.c
arch/frv/kernel/signal.c
arch/i386/defconfig
arch/i386/kernel/cpu/cpufreq/Kconfig
arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
arch/i386/kernel/quirks.c
arch/i386/kernel/signal.c
arch/i386/kernel/syscall_table.S
arch/ia64/configs/gensparse_defconfig
arch/ia64/configs/tiger_defconfig
arch/ia64/configs/zx1_defconfig
arch/ia64/defconfig
arch/ia64/ia32/sys_ia32.c
arch/ia64/kernel/mca_asm.S
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/unaligned.c
arch/ia64/kernel/uncached.c
arch/ia64/sn/include/xtalk/hubdev.h
arch/ia64/sn/kernel/io_init.c
arch/ia64/sn/kernel/mca.c
arch/ia64/sn/kernel/xp_main.c
arch/ia64/sn/kernel/xpc_channel.c
arch/ia64/sn/kernel/xpc_main.c
arch/ia64/sn/pci/pci_dma.c
arch/ia64/sn/pci/pcibr/pcibr_provider.c
arch/parisc/configs/a500_defconfig
arch/parisc/configs/c3000_defconfig
arch/powerpc/configs/cell_defconfig
arch/powerpc/configs/g5_defconfig
arch/powerpc/configs/iseries_defconfig
arch/powerpc/configs/maple_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/systbl.S
arch/powerpc/platforms/powermac/feature.c
arch/ppc/configs/bamboo_defconfig
arch/ppc/configs/katana_defconfig
arch/ppc/configs/mpc834x_sys_defconfig
arch/ppc/configs/power3_defconfig
arch/ppc/syslib/mv64x60.c
arch/sparc/kernel/entry.S
arch/sparc/kernel/rtrap.S
arch/sparc/kernel/signal.c
arch/sparc/kernel/sparc_ksyms.c
arch/sparc/kernel/systbls.S
arch/sparc/math-emu/math.c
arch/sparc/mm/iommu.c
arch/sparc64/defconfig
arch/sparc64/kernel/entry.S
arch/sparc64/kernel/process.c
arch/sparc64/kernel/rtrap.S
arch/sparc64/kernel/setup.c
arch/sparc64/kernel/signal.c
arch/sparc64/kernel/signal32.c
arch/sparc64/kernel/sparc64_ksyms.c
arch/sparc64/kernel/sys32.S
arch/sparc64/kernel/sys_sparc32.c
arch/sparc64/kernel/systbls.S
arch/sparc64/kernel/time.c
arch/sparc64/prom/console.c
arch/sparc64/solaris/entry64.S
arch/um/Kconfig
arch/um/Kconfig.i386
arch/um/Makefile
arch/um/drivers/daemon_kern.c
arch/um/drivers/daemon_user.c
arch/um/drivers/fd.c
arch/um/drivers/net_kern.c
arch/um/drivers/ubd_kern.c
arch/um/include/kern_util.h
arch/um/include/longjmp.h [new file with mode: 0644]
arch/um/include/mode_kern.h
arch/um/include/os.h
arch/um/include/skas/mm_id.h [moved from arch/um/kernel/skas/include/mm_id.h with 100% similarity]
arch/um/include/skas/mmu-skas.h [new file with mode: 0644]
arch/um/include/skas/mode-skas.h [new file with mode: 0644]
arch/um/include/skas/mode_kern_skas.h [moved from arch/um/kernel/skas/include/mode_kern-skas.h with 77% similarity]
arch/um/include/skas/proc_mm.h [moved from arch/um/kernel/skas/include/proc_mm.h with 60% similarity]
arch/um/include/skas/skas.h [new file with mode: 0644]
arch/um/include/skas/stub-data.h [moved from arch/um/kernel/skas/include/stub-data.h with 100% similarity]
arch/um/include/skas/uaccess-skas.h [moved from arch/um/kernel/skas/include/uaccess-skas.h with 63% similarity]
arch/um/include/time_user.h [deleted file]
arch/um/include/tt/debug.h [moved from arch/um/kernel/tt/include/debug.h with 99% similarity]
arch/um/include/tt/mmu-tt.h [new file with mode: 0644]
arch/um/include/tt/mode-tt.h [new file with mode: 0644]
arch/um/include/tt/mode_kern_tt.h [new file with mode: 0644]
arch/um/include/tt/tt.h [moved from arch/um/kernel/tt/include/tt.h with 73% similarity]
arch/um/include/tt/uaccess-tt.h [moved from arch/um/kernel/tt/include/uaccess-tt.h with 80% similarity]
arch/um/include/user.h
arch/um/include/user_util.h
arch/um/kernel/Makefile
arch/um/kernel/exec_kern.c
arch/um/kernel/process_kern.c
arch/um/kernel/sigio_user.c
arch/um/kernel/signal_kern.c
arch/um/kernel/skas/Makefile
arch/um/kernel/skas/include/mmu-skas.h [deleted file]
arch/um/kernel/skas/include/mode-skas.h [deleted file]
arch/um/kernel/skas/include/skas.h [deleted file]
arch/um/kernel/skas/mmu.c
arch/um/kernel/skas/process.c
arch/um/kernel/skas/process_kern.c
arch/um/kernel/skas/uaccess.c
arch/um/kernel/syscall.c
arch/um/kernel/time_kern.c
arch/um/kernel/tt/exec_kern.c
arch/um/kernel/tt/gdb.c
arch/um/kernel/tt/include/mmu-tt.h [deleted file]
arch/um/kernel/tt/process_kern.c
arch/um/kernel/tt/ptproxy/ptrace.c
arch/um/kernel/tt/ptproxy/sysdep.c
arch/um/kernel/tt/trap_user.c
arch/um/os-Linux/Makefile
arch/um/os-Linux/helper.c
arch/um/os-Linux/main.c
arch/um/os-Linux/process.c
arch/um/os-Linux/signal.c
arch/um/os-Linux/skas/Makefile
arch/um/os-Linux/skas/mem.c [moved from arch/um/kernel/skas/mem_user.c with 52% similarity]
arch/um/os-Linux/skas/process.c [new file with mode: 0644]
arch/um/os-Linux/start_up.c
arch/um/os-Linux/time.c
arch/um/os-Linux/trap.c
arch/um/os-Linux/tt.c
arch/um/os-Linux/uaccess.c
arch/um/os-Linux/util.c [moved from arch/um/kernel/user_util.c with 56% similarity]
arch/um/sys-i386/ldt.c
arch/x86_64/defconfig
arch/x86_64/ia32/ia32entry.S
arch/x86_64/kernel/setup64.c
block/elevator.c
block/ll_rw_blk.c
drivers/Kconfig
drivers/Makefile
drivers/char/synclink_gt.c
drivers/char/tlclk.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_userspace.c
drivers/edac/Kconfig [new file with mode: 0644]
drivers/edac/Makefile [new file with mode: 0644]
drivers/edac/amd76x_edac.c [new file with mode: 0644]
drivers/edac/e752x_edac.c [new file with mode: 0644]
drivers/edac/e7xxx_edac.c [new file with mode: 0644]
drivers/edac/edac_mc.c [new file with mode: 0644]
drivers/edac/edac_mc.h [new file with mode: 0644]
drivers/edac/i82860_edac.c [new file with mode: 0644]
drivers/edac/i82875p_edac.c [new file with mode: 0644]
drivers/edac/r82600_edac.c [new file with mode: 0644]
drivers/input/touchscreen/ads7846.c
drivers/md/kcopyd.c
drivers/message/fusion/Makefile
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptbase.h
drivers/message/fusion/mptfc.c
drivers/message/fusion/mptsas.c
drivers/message/fusion/mptscsih.c
drivers/message/fusion/mptscsih.h
drivers/message/fusion/mptspi.c
drivers/misc/ibmasm/uart.c
drivers/net/Kconfig
drivers/net/acenic.c
drivers/net/b44.c
drivers/net/bonding/bond_main.c
drivers/net/cassini.c
drivers/net/e100.c
drivers/net/e1000/e1000.h
drivers/net/e1000/e1000_ethtool.c
drivers/net/e1000/e1000_hw.c
drivers/net/e1000/e1000_hw.h
drivers/net/e1000/e1000_main.c
drivers/net/e1000/e1000_osdep.h
drivers/net/e1000/e1000_param.c
drivers/net/mv643xx_eth.c
drivers/net/s2io.c
drivers/net/skge.c
drivers/net/sky2.c
drivers/net/spider_net.c
drivers/net/spider_net.h
drivers/net/spider_net_ethtool.c
drivers/net/sungem.c
drivers/net/tg3.c
drivers/net/tg3.h
drivers/net/wireless/airo.c
drivers/net/wireless/atmel.c
drivers/net/wireless/hostap/Kconfig
drivers/net/wireless/hostap/Makefile
drivers/net/wireless/hostap/hostap.h
drivers/net/wireless/hostap/hostap_80211.h
drivers/net/wireless/hostap/hostap_80211_rx.c
drivers/net/wireless/hostap/hostap_80211_tx.c
drivers/net/wireless/hostap/hostap_ap.c
drivers/net/wireless/hostap/hostap_ap.h
drivers/net/wireless/hostap/hostap_common.h
drivers/net/wireless/hostap/hostap_config.h
drivers/net/wireless/hostap/hostap_info.c
drivers/net/wireless/hostap/hostap_ioctl.c
drivers/net/wireless/hostap/hostap_main.c
drivers/net/wireless/hostap/hostap_proc.c
drivers/net/wireless/hostap/hostap_wlan.h
drivers/net/wireless/ipw2100.c
drivers/net/wireless/ipw2200.c
drivers/net/wireless/orinoco_cs.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/prism54/islpci_eth.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/wavelan_cs.c
drivers/pci/msi.c
drivers/pci/quirks.c
drivers/scsi/ahci.c
drivers/scsi/aic7xxx/Kconfig.aic79xx
drivers/scsi/aic7xxx/aic79xx.h
drivers/scsi/aic7xxx/aic79xx.reg
drivers/scsi/aic7xxx/aic79xx.seq
drivers/scsi/aic7xxx/aic79xx_core.c
drivers/scsi/aic7xxx/aic79xx_inline.h
drivers/scsi/aic7xxx/aic79xx_osm.c
drivers/scsi/aic7xxx/aic79xx_osm.h
drivers/scsi/aic7xxx/aic79xx_osm_pci.c
drivers/scsi/aic7xxx/aic79xx_pci.c
drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
drivers/scsi/aic7xxx/aicasm/aicasm.c
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h
drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
drivers/scsi/ata_piix.c
drivers/scsi/dc395x.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ibmvscsi/ibmvscsi.h
drivers/scsi/ibmvscsi/iseries_vscsi.c
drivers/scsi/ibmvscsi/rpa_vscsi.c
drivers/scsi/ips.c
drivers/scsi/libata-core.c
drivers/scsi/libata-scsi.c
drivers/scsi/megaraid/megaraid_sas.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/qla1280.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/sata_promise.c
drivers/scsi/sata_svw.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_sas.c
drivers/scsi/sg.c
drivers/scsi/st.c
drivers/serial/21285.c
drivers/serial/8250.c
drivers/serial/8250_pci.c
drivers/serial/Kconfig
drivers/serial/amba-pl010.c
drivers/serial/at91_serial.c
drivers/serial/clps711x.c
drivers/serial/imx.c
drivers/serial/s3c2410.c
drivers/serial/sa1100.c
drivers/serial/serial_core.c
drivers/serial/serial_lh7a40x.c
drivers/serial/sh-sci.c
drivers/serial/sn_console.c
drivers/serial/suncore.c
drivers/serial/sunsab.c
drivers/serial/sunsu.c
drivers/video/amba-clcd.c
drivers/video/sbuslib.c
drivers/video/sbuslib.h
fs/9p/Makefile
fs/9p/v9fs_vfs.h
fs/9p/vfs_addr.c [new file with mode: 0644]
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/bio.c
fs/cifs/CHANGES
fs/cifs/README
fs/cifs/cifs_debug.c
fs/cifs/cifs_fs_sb.h
fs/cifs/cifsacl.h [new file with mode: 0644]
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/readdir.c
fs/cifs/rfc1002pdu.h
fs/cifs/transport.c
fs/cifs/xattr.c
fs/compat.c
fs/exec.c
fs/exportfs/expfs.c
fs/hfs/bfind.c
fs/hfs/bnode.c
fs/hfs/brec.c
fs/hfs/btree.c
fs/hfs/catalog.c
fs/hfs/dir.c
fs/hfs/hfs_fs.h
fs/hfs/inode.c
fs/hfs/mdb.c
fs/hfs/super.c
fs/hfsplus/bfind.c
fs/hfsplus/bnode.c
fs/hfsplus/brec.c
fs/hfsplus/btree.c
fs/hfsplus/catalog.c
fs/hfsplus/dir.c
fs/hfsplus/extents.c
fs/hfsplus/hfsplus_fs.h
fs/hfsplus/hfsplus_raw.h
fs/hfsplus/inode.c
fs/hfsplus/options.c
fs/hfsplus/super.c
fs/hfsplus/unicode.c
fs/hfsplus/wrapper.c
fs/inotify.c
fs/jbd/checkpoint.c
fs/jbd/commit.c
fs/namei.c
fs/nfsctl.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4recover.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfsproc.c
fs/nfsd/vfs.c
fs/open.c
fs/select.c
fs/stat.c
fs/xfs/linux-2.6/xfs_aops.c
include/asm-arm/arch-omap/clock.h
include/asm-arm/arch-pxa/pxa-regs.h
include/asm-arm/arch-s3c2410/debug-macro.S
include/asm-arm/arch-s3c2410/map.h
include/asm-arm/arch-s3c2410/regs-serial.h
include/asm-arm/arch-s3c2410/uncompress.h
include/asm-arm/mach/map.h
include/asm-arm/pgtable.h
include/asm-frv/thread_info.h
include/asm-frv/unistd.h
include/asm-i386/edac.h [new file with mode: 0644]
include/asm-i386/futex.h
include/asm-i386/signal.h
include/asm-i386/thread_info.h
include/asm-i386/unistd.h
include/asm-ia64/semaphore.h
include/asm-ia64/sn/sn_feature_sets.h
include/asm-ia64/sn/xp.h
include/asm-ia64/sn/xpc.h
include/asm-ia64/topology.h
include/asm-powerpc/lppaca.h
include/asm-powerpc/thread_info.h
include/asm-powerpc/unistd.h
include/asm-sparc/oplib.h
include/asm-sparc/thread_info.h
include/asm-sparc/unistd.h
include/asm-sparc64/oplib.h
include/asm-sparc64/spinlock.h
include/asm-sparc64/thread_info.h
include/asm-sparc64/unistd.h
include/asm-um/io.h
include/asm-um/thread_info.h
include/asm-um/unistd.h
include/asm-x86_64/edac.h [new file with mode: 0644]
include/asm-x86_64/ia32_unistd.h
include/asm-x86_64/unistd.h
include/linux/blkdev.h
include/linux/cpufreq.h
include/linux/fcntl.h
include/linux/fs.h
include/linux/if_ether.h
include/linux/kernel.h
include/linux/libata.h
include/linux/mempolicy.h
include/linux/mm_inline.h
include/linux/mmzone.h
include/linux/namei.h
include/linux/netfilter/x_tables.h
include/linux/netfilter_ipv6/ip6t_ah.h
include/linux/netfilter_ipv6/ip6t_esp.h
include/linux/netfilter_ipv6/ip6t_frag.h
include/linux/netfilter_ipv6/ip6t_opts.h
include/linux/netfilter_ipv6/ip6t_rt.h
include/linux/nfsd/nfsd.h
include/linux/nfsd/xdr4.h
include/linux/pci_ids.h
include/linux/poll.h
include/linux/sched.h
include/linux/serial_8250.h
include/linux/serial_core.h
include/linux/skbuff.h
include/linux/sunrpc/svc.h
include/linux/swap.h
include/linux/syscalls.h
include/linux/sysctl.h
include/linux/time.h
include/linux/tipc_config.h
include/linux/topology.h
include/net/ieee80211.h
include/net/ieee80211_crypt.h
include/net/iw_handler.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/scsi/scsi_device.h
include/scsi/scsi_host.h
include/scsi/scsi_transport_spi.h
kernel/audit.c
kernel/auditsc.c
kernel/compat.c
kernel/rcutorture.c
kernel/sched.c
kernel/signal.c
kernel/sysctl.c
kernel/time.c
kernel/user.c
mm/filemap.c
mm/mempolicy.c
mm/page-writeback.c
mm/page_alloc.c
mm/rmap.c
mm/slab.c
mm/swap.c
mm/swapfile.c
mm/vmscan.c
net/Kconfig
net/bridge/netfilter/ebt_ip.c
net/bridge/netfilter/ebt_log.c
net/core/dev.c
net/core/filter.c
net/core/netpoll.c
net/core/pktgen.c
net/core/skbuff.c
net/dccp/ackvec.c
net/ieee80211/ieee80211_rx.c
net/ieee80211/ieee80211_wx.c
net/ipv4/igmp.c
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/ip_conntrack_proto_gre.c
net/ipv4/netfilter/ipt_policy.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/tcp_htcp.c
net/ipv6/addrconf.c
net/ipv6/anycast.c
net/ipv6/ip6_flowlabel.c
net/ipv6/mcast.c
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/ip6t_dst.c
net/ipv6/netfilter/ip6t_eui64.c
net/ipv6/netfilter/ip6t_frag.c
net/ipv6/netfilter/ip6t_hbh.c
net/ipv6/netfilter/ip6t_ipv6header.c
net/ipv6/netfilter/ip6t_owner.c
net/ipv6/netfilter/ip6t_policy.c
net/ipv6/netfilter/ip6t_rt.c
net/key/af_key.c
net/packet/af_packet.c
net/rxrpc/krxtimod.c
net/rxrpc/proc.c
net/sched/sch_prio.c
net/sched/sch_sfq.c
net/sctp/input.c
net/sctp/inqueue.c
net/sctp/proc.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sctp/transport.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/svcsock.c
net/tipc/Kconfig
net/tipc/addr.c
net/tipc/addr.h
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/cluster.c
net/tipc/cluster.h
net/tipc/config.c
net/tipc/config.h
net/tipc/core.c
net/tipc/core.h
net/tipc/dbg.c
net/tipc/dbg.h
net/tipc/discover.c
net/tipc/discover.h
net/tipc/eth_media.c
net/tipc/handler.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/name_distr.h
net/tipc/name_table.c
net/tipc/name_table.h
net/tipc/net.c
net/tipc/net.h
net/tipc/netlink.c
net/tipc/node.c
net/tipc/node.h
net/tipc/node_subscr.c
net/tipc/node_subscr.h
net/tipc/port.c
net/tipc/port.h
net/tipc/ref.c
net/tipc/ref.h
net/tipc/socket.c
net/tipc/subscr.c
net/tipc/subscr.h
net/tipc/user_reg.c
net/tipc/user_reg.h
net/tipc/zone.c
net/tipc/zone.h
security/seclvl.c
sound/oss/trident.c
sound/sparc/cs4231.c

diff --git a/Documentation/drivers/edac/edac.txt b/Documentation/drivers/edac/edac.txt
new file mode 100644 (file)
index 0000000..d37191f
--- /dev/null
@@ -0,0 +1,673 @@
+
+
+EDAC - Error Detection And Correction
+
+Written by Doug Thompson <norsk5@xmission.com>
+7 Dec 2005
+
+
+EDAC was written by:
+       Thayne Harbaugh,
+       modified by Dave Peterson, Doug Thompson, et al,
+       from the bluesmoke.sourceforge.net project.
+
+
+============================================================================
+EDAC PURPOSE
+
+The 'edac' kernel module goal is to detect and report errors that occur
+within the computer system. In the initial release, memory Correctable Errors
+(CE) and Uncorrectable Errors (UE) are the primary errors being harvested.
+
+Detecting CE events, then harvesting those events and reporting them,
+CAN be a predictor of future UE events.  With CE events, the system can
+continue to operate, but with less safety. Preventive maintainence and
+proactive part replacement of memory DIMMs exhibiting CEs can reduce
+the likelihood of the dreaded UE events and system 'panics'.
+
+
+In addition, PCI Bus Parity and SERR Errors are scanned for on PCI devices
+in order to determine if errors are occurring on data transfers.
+The presence of PCI Parity errors must be examined with a grain of salt.
+There are several addin adapters that do NOT follow the PCI specification
+with regards to Parity generation and reporting. The specification says
+the vendor should tie the parity status bits to 0 if they do not intend
+to generate parity.  Some vendors do not do this, and thus the parity bit
+can "float" giving false positives.
+
+The PCI Parity EDAC device has the ability to "skip" known flakey
+cards during the parity scan. These are set by the parity "blacklist"
+interface in the sysfs for PCI Parity. (See the PCI section in the sysfs
+section below.) There is also a parity "whitelist" which is used as
+an explicit list of devices to scan, while the blacklist is a list
+of devices to skip.
+
+EDAC will have future error detectors that will be added or integrated
+into EDAC in the following list:
+
+       MCE     Machine Check Exception
+       MCA     Machine Check Architecture
+       NMI     NMI notification of ECC errors
+       MSRs    Machine Specific Register error cases
+       and other mechanisms.
+
+These errors are usually bus errors, ECC errors, thermal throttling
+and the like.
+
+
+============================================================================
+EDAC VERSIONING
+
+EDAC is composed of a "core" module (edac_mc.ko) and several Memory
+Controller (MC) driver modules. On a given system, the CORE
+is loaded and one MC driver will be loaded. Both the CORE and
+the MC driver have individual versions that reflect current release
+level of their respective modules.  Thus, to "report" on what version
+a system is running, one must report both the CORE's and the
+MC driver's versions.
+
+
+LOADING
+
+If 'edac' was statically linked with the kernel then no loading is
+necessary.  If 'edac' was built as modules then simply modprobe the
+'edac' pieces that you need.  You should be able to modprobe
+hardware-specific modules and have the dependencies load the necessary core
+modules.
+
+Example:
+
+$> modprobe amd76x_edac
+
+loads both the amd76x_edac.ko memory controller module and the edac_mc.ko
+core module.
+
+
+============================================================================
+EDAC sysfs INTERFACE
+
+EDAC presents a 'sysfs' interface for control, reporting and attribute
+reporting purposes.
+
+EDAC lives in the /sys/devices/system/edac directory. Within this directory
+there currently reside 2 'edac' components:
+
+       mc      memory controller(s) system
+       pci     PCI status system
+
+
+============================================================================
+Memory Controller (mc) Model
+
+First a background on the memory controller's model abstracted in EDAC.
+Each mc device controls a set of DIMM memory modules. These modules are
+layed out in a Chip-Select Row (csrowX) and Channel table (chX). There can
+be multiple csrows and two channels.
+
+Memory controllers allow for several csrows, with 8 csrows being a typical value.
+Yet, the actual number of csrows depends on the electrical "loading"
+of a given motherboard, memory controller and DIMM characteristics.
+
+Dual channels allows for 128 bit data transfers to the CPU from memory.
+
+
+               Channel 0       Channel 1
+       ===================================
+       csrow0  | DIMM_A0       | DIMM_B0 |
+       csrow1  | DIMM_A0       | DIMM_B0 |
+       ===================================
+
+       ===================================
+       csrow2  | DIMM_A1       | DIMM_B1 |
+       csrow3  | DIMM_A1       | DIMM_B1 |
+       ===================================
+
+In the above example table there are 4 physical slots on the motherboard
+for memory DIMMs:
+
+       DIMM_A0
+       DIMM_B0
+       DIMM_A1
+       DIMM_B1
+
+Labels for these slots are usually silk screened on the motherboard. Slots
+labeled 'A' are channel 0 in this example. Slots labled 'B'
+are channel 1. Notice that there are two csrows possible on a
+physical DIMM. These csrows are allocated their csrow assignment
+based on the slot into which the memory DIMM is placed. Thus, when 1 DIMM
+is placed in each Channel, the csrows cross both DIMMs.
+
+Memory DIMMs come single or dual "ranked". A rank is a populated csrow.
+Thus, 2 single ranked DIMMs, placed in slots DIMM_A0 and DIMM_B0 above
+will have 1 csrow, csrow0. csrow1 will be empty. On the other hand,
+when 2 dual ranked DIMMs are similiaryly placed, then both csrow0 and
+csrow1 will be populated. The pattern repeats itself for csrow2 and
+csrow3.
+
+The representation of the above is reflected in the directory tree
+in EDAC's sysfs interface. Starting in directory
+/sys/devices/system/edac/mc each memory controller will be represented
+by its own 'mcX' directory, where 'X" is the index of the MC.
+
+
+       ..../edac/mc/
+                  |
+                  |->mc0
+                  |->mc1
+                  |->mc2
+                  ....
+
+Under each 'mcX' directory each 'csrowX' is again represented by a
+'csrowX', where 'X" is the csrow index:
+
+
+       .../mc/mc0/
+               |
+               |->csrow0
+               |->csrow2
+               |->csrow3
+               ....
+
+Notice that there is no csrow1, which indicates that csrow0 is
+composed of a single ranked DIMMs. This should also apply in both
+Channels, in order to have dual-channel mode be operational. Since
+both csrow2 and csrow3 are populated, this indicates a dual ranked
+set of DIMMs for channels 0 and 1.
+
+
+Within each of the 'mc','mcX' and 'csrowX' directories are several
+EDAC control and attribute files.
+
+
+============================================================================
+DIRECTORY 'mc'
+
+In directory 'mc' are EDAC system overall control and attribute files:
+
+
+Panic on UE control file:
+
+       'panic_on_ue'
+
+       An uncorrectable error will cause a machine panic.  This is usually
+       desirable.  It is a bad idea to continue when an uncorrectable error
+       occurs - it is indeterminate what was uncorrected and the operating
+       system context might be so mangled that continuing will lead to further
+       corruption. If the kernel has MCE configured, then EDAC will never
+       notice the UE.
+
+       LOAD TIME: module/kernel parameter: panic_on_ue=[0|1]
+
+       RUN TIME:  echo "1" >/sys/devices/system/edac/mc/panic_on_ue
+
+
+Log UE control file:
+
+       'log_ue'
+
+       Generate kernel messages describing uncorrectable errors.  These errors
+       are reported through the system message log system.  UE statistics
+       will be accumulated even when UE logging is disabled.
+
+       LOAD TIME: module/kernel parameter: log_ue=[0|1]
+
+       RUN TIME: echo "1" >/sys/devices/system/edac/mc/log_ue
+
+
+Log CE control file:
+
+       'log_ce'
+
+       Generate kernel messages describing correctable errors.  These
+       errors are reported through the system message log system.
+       CE statistics will be accumulated even when CE logging is disabled.
+
+       LOAD TIME: module/kernel parameter: log_ce=[0|1]
+
+       RUN TIME: echo "1" >/sys/devices/system/edac/mc/log_ce
+
+
+Polling period control file:
+
+       'poll_msec'
+
+       The time period, in milliseconds, for polling for error information.
+       Too small a value wastes resources.  Too large a value might delay
+       necessary handling of errors and might loose valuable information for
+       locating the error.  1000 milliseconds (once each second) is about
+       right for most uses.
+
+       LOAD TIME: module/kernel parameter: poll_msec=[0|1]
+
+       RUN TIME: echo "1000" >/sys/devices/system/edac/mc/poll_msec
+
+
+Module Version read-only attribute file:
+
+       'mc_version'
+
+       The EDAC CORE modules's version and compile date are shown here to
+       indicate what EDAC is running.
+
+
+
+============================================================================
+'mcX' DIRECTORIES
+
+
+In 'mcX' directories are EDAC control and attribute files for
+this 'X" instance of the memory controllers:
+
+
+Counter reset control file:
+
+       'reset_counters'
+
+       This write-only control file will zero all the statistical counters
+       for UE and CE errors.  Zeroing the counters will also reset the timer
+       indicating how long since the last counter zero.  This is useful
+       for computing errors/time.  Since the counters are always reset at
+       driver initialization time, no module/kernel parameter is available.
+
+       RUN TIME: echo "anything" >/sys/devices/system/edac/mc/mc0/counter_reset
+
+               This resets the counters on memory controller 0
+
+
+Seconds since last counter reset control file:
+
+       'seconds_since_reset'
+
+       This attribute file displays how many seconds have elapsed since the
+       last counter reset. This can be used with the error counters to
+       measure error rates.
+
+
+
+DIMM capability attribute file:
+
+       'edac_capability'
+
+       The EDAC (Error Detection and Correction) capabilities/modes of
+       the memory controller hardware.
+
+
+DIMM Current Capability attribute file:
+
+       'edac_current_capability'
+
+       The EDAC capabilities available with the hardware
+       configuration.  This may not be the same as "EDAC capability"
+       if the correct memory is not used.  If a memory controller is
+       capable of EDAC, but DIMMs without check bits are in use, then
+       Parity, SECDED, S4ECD4ED capabilities will not be available
+       even though the memory controller might be capable of those
+       modes with the proper memory loaded.
+
+
+Memory Type supported on this controller attribute file:
+
+       'supported_mem_type'
+
+       This attribute file displays the memory type, usually
+       buffered and unbuffered DIMMs.
+
+
+Memory Controller name attribute file:
+
+       'mc_name'
+
+       This attribute file displays the type of memory controller
+       that is being utilized.
+
+
+Memory Controller Module name attribute file:
+
+       'module_name'
+
+       This attribute file displays the memory controller module name,
+       version and date built.  The name of the memory controller
+       hardware - some drivers work with multiple controllers and
+       this field shows which hardware is present.
+
+
+Total memory managed by this memory controller attribute file:
+
+       'size_mb'
+
+       This attribute file displays, in count of megabytes, of memory
+       that this instance of memory controller manages.
+
+
+Total Uncorrectable Errors count attribute file:
+
+       'ue_count'
+
+       This attribute file displays the total count of uncorrectable
+       errors that have occurred on this memory controller. If panic_on_ue
+       is set this counter will not have a chance to increment,
+       since EDAC will panic the system.
+
+
+Total UE count that had no information attribute fileY:
+
+       'ue_noinfo_count'
+
+       This attribute file displays the number of UEs that
+       have occurred have occurred with  no informations as to which DIMM
+       slot is having errors.
+
+
+Total Correctable Errors count attribute file:
+
+       'ce_count'
+
+       This attribute file displays the total count of correctable
+       errors that have occurred on this memory controller. This
+       count is very important to examine. CEs provide early
+       indications that a DIMM is beginning to fail. This count
+       field should be monitored for non-zero values and report
+       such information to the system administrator.
+
+
+Total Correctable Errors count attribute file:
+
+       'ce_noinfo_count'
+
+       This attribute file displays the number of CEs that
+       have occurred wherewith no informations as to which DIMM slot
+       is having errors. Memory is handicapped, but operational,
+       yet no information is available to indicate which slot
+       the failing memory is in. This count field should be also
+       be monitored for non-zero values.
+
+Device Symlink:
+
+       'device'
+
+       Symlink to the memory controller device
+
+
+
+============================================================================
+'csrowX' DIRECTORIES
+
+In the 'csrowX' directories are EDAC control and attribute files for
+this 'X" instance of csrow:
+
+
+Total Uncorrectable Errors count attribute file:
+
+       'ue_count'
+
+       This attribute file displays the total count of uncorrectable
+       errors that have occurred on this csrow. If panic_on_ue is set
+       this counter will not have a chance to increment, since EDAC
+       will panic the system.
+
+
+Total Correctable Errors count attribute file:
+
+       'ce_count'
+
+       This attribute file displays the total count of correctable
+       errors that have occurred on this csrow. This
+       count is very important to examine. CEs provide early
+       indications that a DIMM is beginning to fail. This count
+       field should be monitored for non-zero values and report
+       such information to the system administrator.
+
+
+Total memory managed by this csrow attribute file:
+
+       'size_mb'
+
+       This attribute file displays, in count of megabytes, of memory
+       that this csrow contatins.
+
+
+Memory Type attribute file:
+
+       'mem_type'
+
+       This attribute file will display what type of memory is currently
+       on this csrow. Normally, either buffered or unbuffered memory.
+
+
+EDAC Mode of operation attribute file:
+
+       'edac_mode'
+
+       This attribute file will display what type of Error detection
+       and correction is being utilized.
+
+
+Device type attribute file:
+
+       'dev_type'
+
+       This attribute file will display what type of DIMM device is
+       being utilized. Example:  x4
+
+
+Channel 0 CE Count attribute file:
+
+       'ch0_ce_count'
+
+       This attribute file will display the count of CEs on this
+       DIMM located in channel 0.
+
+
+Channel 0 UE Count attribute file:
+
+       'ch0_ue_count'
+
+       This attribute file will display the count of UEs on this
+       DIMM located in channel 0.
+
+
+Channel 0 DIMM Label control file:
+
+       'ch0_dimm_label'
+
+       This control file allows this DIMM to have a label assigned
+       to it. With this label in the module, when errors occur
+       the output can provide the DIMM label in the system log.
+       This becomes vital for panic events to isolate the
+       cause of the UE event.
+
+       DIMM Labels must be assigned after booting, with information
+       that correctly identifies the physical slot with its
+       silk screen label. This information is currently very
+       motherboard specific and determination of this information
+       must occur in userland at this time.
+
+
+Channel 1 CE Count attribute file:
+
+       'ch1_ce_count'
+
+       This attribute file will display the count of CEs on this
+       DIMM located in channel 1.
+
+
+Channel 1 UE Count attribute file:
+
+       'ch1_ue_count'
+
+       This attribute file will display the count of UEs on this
+       DIMM located in channel 0.
+
+
+Channel 1 DIMM Label control file:
+
+       'ch1_dimm_label'
+
+       This control file allows this DIMM to have a label assigned
+       to it. With this label in the module, when errors occur
+       the output can provide the DIMM label in the system log.
+       This becomes vital for panic events to isolate the
+       cause of the UE event.
+
+       DIMM Labels must be assigned after booting, with information
+       that correctly identifies the physical slot with its
+       silk screen label. This information is currently very
+       motherboard specific and determination of this information
+       must occur in userland at this time.
+
+
+============================================================================
+SYSTEM LOGGING
+
+If logging for UEs and CEs are enabled then system logs will have
+error notices indicating errors that have been detected:
+
+MC0: CE page 0x283, offset 0xce0, grain 8, syndrome 0x6ec3, row 0,
+channel 1 "DIMM_B1": amd76x_edac
+
+MC0: CE page 0x1e5, offset 0xfb0, grain 8, syndrome 0xb741, row 0,
+channel 1 "DIMM_B1": amd76x_edac
+
+
+The structure of the message is:
+       the memory controller                   (MC0)
+       Error type                              (CE)
+       memory page                             (0x283)
+       offset in the page                      (0xce0)
+       the byte granularity                    (grain 8)
+               or resolution of the error
+       the error syndrome                      (0xb741)
+       memory row                              (row 0)
+       memory channel                          (channel 1)
+       DIMM label, if set prior                (DIMM B1
+       and then an optional, driver-specific message that may
+               have additional information.
+
+Both UEs and CEs with no info will lack all but memory controller,
+error type, a notice of "no info" and then an optional,
+driver-specific error message.
+
+
+
+============================================================================
+PCI Bus Parity Detection
+
+
+On Header Type 00 devices the primary status is looked at
+for any parity error regardless of whether Parity is enabled on the
+device.  (The spec indicates parity is generated in some cases).
+On Header Type 01 bridges, the secondary status register is also
+looked at to see if parity ocurred on the bus on the other side of
+the bridge.
+
+
+SYSFS CONFIGURATION
+
+Under /sys/devices/system/edac/pci are control and attribute files as follows:
+
+
+Enable/Disable PCI Parity checking control file:
+
+       'check_pci_parity'
+
+
+       This control file enables or disables the PCI Bus Parity scanning
+       operation. Writing a 1 to this file enables the scanning. Writing
+       a 0 to this file disables the scanning.
+
+       Enable:
+       echo "1" >/sys/devices/system/edac/pci/check_pci_parity
+
+       Disable:
+       echo "0" >/sys/devices/system/edac/pci/check_pci_parity
+
+
+
+Panic on PCI PARITY Error:
+
+       'panic_on_pci_parity'
+
+
+       This control files enables or disables panic'ing when a parity
+       error has been detected.
+
+
+       module/kernel parameter: panic_on_pci_parity=[0|1]
+
+       Enable:
+       echo "1" >/sys/devices/system/edac/pci/panic_on_pci_parity
+
+       Disable:
+       echo "0" >/sys/devices/system/edac/pci/panic_on_pci_parity
+
+
+Parity Count:
+
+       'pci_parity_count'
+
+       This attribute file will display the number of parity errors that
+       have been detected.
+
+
+
+PCI Device Whitelist:
+
+       'pci_parity_whitelist'
+
+       This control file allows for an explicit list of PCI devices to be
+       scanned for parity errors. Only devices found on this list will
+       be examined.  The list is a line of hexadecimel VENDOR and DEVICE
+       ID tuples:
+
+       1022:7450,1434:16a6
+
+       One or more can be inserted, seperated by a comma.
+
+       To write the above list doing the following as one command line:
+
+       echo "1022:7450,1434:16a6"
+               > /sys/devices/system/edac/pci/pci_parity_whitelist
+
+
+
+       To display what the whitelist is, simply 'cat' the same file.
+
+
+PCI Device Blacklist:
+
+       'pci_parity_blacklist'
+
+       This control file allows for a list of PCI devices to be
+       skipped for scanning.
+       The list is a line of hexadecimel VENDOR and DEVICE ID tuples:
+
+       1022:7450,1434:16a6
+
+       One or more can be inserted, seperated by a comma.
+
+       To write the above list doing the following as one command line:
+
+       echo "1022:7450,1434:16a6"
+               > /sys/devices/system/edac/pci/pci_parity_blacklist
+
+
+       To display what the whitelist current contatins,
+       simply 'cat' the same file.
+
+=======================================================================
+
+PCI Vendor and Devices IDs can be obtained with the lspci command. Using
+the -n option lspci will display the vendor and device IDs. The system
+adminstrator will have to determine which devices should be scanned or
+skipped.
+
+
+
+The two lists (white and black) are prioritized. blacklist is the lower
+priority and will NOT be utilized when a whitelist has been set.
+Turn OFF a whitelist by an empty echo command:
+
+       echo > /sys/devices/system/edac/pci/pci_parity_whitelist
+
+and any previous blacklist will be utililzed.
+
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
new file mode 100644 (file)
index 0000000..f8c16cb
--- /dev/null
@@ -0,0 +1,24 @@
+1 Release Date    : Mon Jan 23 14:09:01 PST 2006 - Sumant Patro <Sumant.Patro@lsil.com>
+2 Current Version : 00.00.02.02
+3 Older Version   : 00.00.02.01 
+
+i.     New template defined to represent each family of controllers (identified by processor used). 
+       The template will have defintions that will be initialised to appropritae values for a specific family of controllers. The template definition has four function pointers. During driver initialisation the function pointers will be set based on the controller family type. This change is done to support new controllers that has different processors and thus different register set.
+
+               -Sumant Patro <Sumant.Patro@lsil.com>
+
+1 Release Date    : Mon Dec 19 14:36:26 PST 2005 - Sumant Patro <Sumant.Patro@lsil.com>
+2 Current Version : 00.00.02.00-rc4 
+3 Older Version   : 00.00.02.01 
+
+i.     Code reorganized to remove code duplication in megasas_build_cmd. 
+
+       "There's a lot of duplicate code megasas_build_cmd.  Move that out of the different codepathes and merge the reminder of megasas_build_cmd into megasas_queue_command"
+
+               - Christoph Hellwig <hch@lst.de>
+
+ii.    Defined MEGASAS_IOC_FIRMWARE32 for code paths that handles 32 bit applications in 64 bit systems.
+
+       "MEGASAS_IOC_FIRMWARE can't be redefined if CONFIG_COMPAT is set, we need to define a MEGASAS_IOC_FIRMWARE32 define so native binaries continue to work"
+
+               - Christoph Hellwig <hch@lst.de>
index 0aeef740a95a198eddad83113a2a301ac242b801..382b439b439e1ea8c16ee2326e1ab8469884455b 100644 (file)
@@ -1,5 +1,5 @@
 ====================================================================
-=             Adaptec Ultra320 Family Manager Set v1.3.11          =
+=             Adaptec Ultra320 Family Manager Set                  =
 =                                                                  =
 =                            README for                            =
 =                    The Linux Operating System                    =
@@ -63,6 +63,11 @@ The following information is available in this file:
                               68-pin)
 2. Version History
 
+   3.0   (December 1st, 2005)
+       - Updated driver to use SCSI transport class infrastructure
+       - Upported sequencer and core fixes from adaptec released
+         version 2.0.15 of the driver.
+
    1.3.11 (July 11, 2003)
         - Fix several deadlock issues.
         - Add 29320ALP and 39320B Id's.
@@ -194,7 +199,7 @@ The following information is available in this file:
           supported)
         - Support for the PCI-X standard up to 133MHz
         - Support for the PCI v2.2 standard
-       - Domain Validation
+        - Domain Validation
 
    2.2. Operating System Support:
         - Redhat Linux 7.2, 7.3, 8.0, Advanced Server 2.1
@@ -411,77 +416,53 @@ The following information is available in this file:
           http://www.adaptec.com.
 
 
-5. Contacting Adaptec
+5. Adaptec Customer Support
 
    A Technical Support Identification (TSID) Number is required for 
    Adaptec technical support.
     - The 12-digit TSID can be found on the white barcode-type label
-      included inside the box with your product. The TSID helps us 
+      included inside the box with your product.  The TSID helps us 
       provide more efficient service by accurately identifying your 
       product and support status.
+
    Support Options
     - Search the Adaptec Support Knowledgebase (ASK) at
       http://ask.adaptec.com for articles, troubleshooting tips, and
-      frequently asked questions for your product.
+      frequently asked questions about your product.
     - For support via Email, submit your question to Adaptec's 
-      Technical Support Specialists at http://ask.adaptec.com.
+      Technical Support Specialists at http://ask.adaptec.com/.
      
    North America
-    - Visit our Web site at http://www.adaptec.com.
-    - To speak with a Fibre Channel/RAID/External Storage Technical
-      Support Specialist, call 1-321-207-2000,
-      Hours: Monday-Friday, 3:00 A.M. to 5:00 P.M., PST.
-      (Not open on holidays)
-    - For Technical Support in all other technologies including 
-      SCSI, call 1-408-934-7274,
-      Hours: Monday-Friday, 6:00 A.M. to 5:00 P.M., PST.
-      (Not open on holidays)
-    - For after hours support, call 1-800-416-8066 ($99/call, 
-      $149/call on holidays)
-    - To order Adaptec products including software and cables, call
-      1-800-442-7274 or 1-408-957-7274. You can also visit our 
-      online store at http://www.adaptecstore.com
+    - Visit our Web site at http://www.adaptec.com/.
+    - For information about Adaptec's support options, call
+      408-957-2550, 24 hours a day, 7 days a week.
+    - To speak with a Technical Support Specialist,
+      * For hardware products, call 408-934-7274,
+        Monday to Friday, 3:00 am to 5:00 pm, PDT.
+      * For RAID and Fibre Channel products, call 321-207-2000,
+        Monday to Friday, 3:00 am to 5:00 pm, PDT.
+      To expedite your service, have your computer with you.
+    - To order Adaptec products, including accessories and cables,
+      call 408-957-7274.  To order cables online go to
+      http://www.adaptec.com/buy-cables/.
 
    Europe
-    - Visit our Web site at http://www.adaptec-europe.com.
-    - English and French: To speak with a Technical Support 
-      Specialist, call one of the following numbers:
-        - English: +32-2-352-3470
-        - French:  +32-2-352-3460
-      Hours: Monday-Thursday, 10:00 to 12:30, 13:30 to 17:30 CET 
-             Friday, 10:00 to 12:30, 13:30 to 16:30 CET
-    - German: To speak with a Technical Support Specialist,
-      call +49-89-456-40660
-      Hours: Monday-Thursday, 09:30 to 12:30, 13:30 to 16:30 CET
-             Friday, 09:30 to 12:30, 13:30 to 15:00 CET
-    - To order Adaptec products, including accessories and cables:
-        - UK: +0800-96-65-26 or fax +0800-731-02-95
-        - Other European countries: +32-11-300-379
-
-   Australia and New Zealand
-    - Visit our Web site at http://www.adaptec.com.au.
-    - To speak with a Technical Support Specialist, call 
-      +612-9416-0698
-      Hours: Monday-Friday, 10:00 A.M. to 4:30 P.M., EAT
-      (Not open on holidays)
+    - Visit our Web site at http://www.adaptec-europe.com/.
+    - To speak with a Technical Support Specialist, call, or email,
+      * German:  +49 89 4366 5522, Monday-Friday, 9:00-17:00 CET,
+        http://ask-de.adaptec.com/.
+      * French:  +49 89 4366 5533, Monday-Friday, 9:00-17:00 CET,
+       http://ask-fr.adaptec.com/.
+      * English: +49 89 4366 5544, Monday-Friday, 9:00-17:00 GMT,
+       http://ask.adaptec.com/.
+    - You can order Adaptec cables online at
+      http://www.adaptec.com/buy-cables/.
 
    Japan
+    - Visit our web site at http://www.adaptec.co.jp/.
     - To speak with a Technical Support Specialist, call 
-      +81-3-5308-6120 
-      Hours: Monday-Friday, 9:00 a.m. to 12:00 p.m., 1:00 p.m. to
-      6:00 p.m. TSC
-
-   Hong Kong and China
-    - To speak with a Technical Support Specialist, call 
-      +852-2869-7200
-      Hours: Monday-Friday, 10:00 to 17:00.
-    - Fax Technical Support at +852-2869-7100.
-
-   Singapore
-    - To speak with a Technical Support Specialist, call 
-      +65-245-7470
-      Hours: Monday-Friday, 10:00 to 17:00.
-    - Fax Technical Support at +852-2869-7100
+      +81 3 5308 6120, Monday-Friday, 9:00 a.m. to 12:00 p.m.,
+      1:00 p.m. to 6:00 p.m.
 
 -------------------------------------------------------------------
 /*
index 47e74ddc4bc9509b5856b2b8fef0067d6f4eb9d7..3481fcded4c2b5760bb40b79bde4602e369f2115 100644 (file)
@@ -309,81 +309,57 @@ The following information is available in this file:
    -----------------------------------------------------------------
 
    Example:
-   'options aic7xxx aic7xxx=verbose,no_probe,tag_info:{{},{,,10}},seltime:1"
+   'options aic7xxx aic7xxx=verbose,no_probe,tag_info:{{},{,,10}},seltime:1'
         enables verbose logging, Disable EISA/VLB probing,
         and set tag depth on Controller 1/Target 2 to 10 tags.
 
-3. Contacting Adaptec
+4. Adaptec Customer Support
 
    A Technical Support Identification (TSID) Number is required for 
    Adaptec technical support.
     - The 12-digit TSID can be found on the white barcode-type label
-      included inside the box with your product. The TSID helps us 
+      included inside the box with your product.  The TSID helps us 
       provide more efficient service by accurately identifying your 
       product and support status.
+
    Support Options
     - Search the Adaptec Support Knowledgebase (ASK) at
       http://ask.adaptec.com for articles, troubleshooting tips, and
-      frequently asked questions for your product.
+      frequently asked questions about your product.
     - For support via Email, submit your question to Adaptec's 
-      Technical Support Specialists at http://ask.adaptec.com.
+      Technical Support Specialists at http://ask.adaptec.com/.
      
    North America
-    - Visit our Web site at http://www.adaptec.com.
-    - To speak with a Fibre Channel/RAID/External Storage Technical
-      Support Specialist, call 1-321-207-2000,
-      Hours: Monday-Friday, 3:00 A.M. to 5:00 P.M., PST.
-      (Not open on holidays)
-    - For Technical Support in all other technologies including 
-      SCSI, call 1-408-934-7274,
-      Hours: Monday-Friday, 6:00 A.M. to 5:00 P.M., PST.
-      (Not open on holidays)
-    - For after hours support, call 1-800-416-8066 ($99/call, 
-      $149/call on holidays)
-    - To order Adaptec products including software and cables, call
-      1-800-442-7274 or 1-408-957-7274. You can also visit our 
-      online store at http://www.adaptecstore.com
+    - Visit our Web site at http://www.adaptec.com/.
+    - For information about Adaptec's support options, call
+      408-957-2550, 24 hours a day, 7 days a week.
+    - To speak with a Technical Support Specialist,
+      * For hardware products, call 408-934-7274,
+        Monday to Friday, 3:00 am to 5:00 pm, PDT.
+      * For RAID and Fibre Channel products, call 321-207-2000,
+        Monday to Friday, 3:00 am to 5:00 pm, PDT.
+      To expedite your service, have your computer with you.
+    - To order Adaptec products, including accessories and cables,
+      call 408-957-7274.  To order cables online go to
+      http://www.adaptec.com/buy-cables/.
 
    Europe
-    - Visit our Web site at http://www.adaptec-europe.com.
-    - English and French: To speak with a Technical Support 
-      Specialist, call one of the following numbers:
-        - English: +32-2-352-3470
-        - French:  +32-2-352-3460
-      Hours: Monday-Thursday, 10:00 to 12:30, 13:30 to 17:30 CET 
-             Friday, 10:00 to 12:30, 13:30 to 16:30 CET
-    - German: To speak with a Technical Support Specialist,
-      call +49-89-456-40660
-      Hours: Monday-Thursday, 09:30 to 12:30, 13:30 to 16:30 CET
-             Friday, 09:30 to 12:30, 13:30 to 15:00 CET
-    - To order Adaptec products, including accessories and cables:
-        - UK: +0800-96-65-26 or fax +0800-731-02-95
-        - Other European countries: +32-11-300-379
-
-   Australia and New Zealand
-    - Visit our Web site at http://www.adaptec.com.au.
-    - To speak with a Technical Support Specialist, call 
-      +612-9416-0698
-      Hours: Monday-Friday, 10:00 A.M. to 4:30 P.M., EAT
-      (Not open on holidays)
+    - Visit our Web site at http://www.adaptec-europe.com/.
+    - To speak with a Technical Support Specialist, call, or email,
+      * German:  +49 89 4366 5522, Monday-Friday, 9:00-17:00 CET,
+        http://ask-de.adaptec.com/.
+      * French:  +49 89 4366 5533, Monday-Friday, 9:00-17:00 CET,
+       http://ask-fr.adaptec.com/.
+      * English: +49 89 4366 5544, Monday-Friday, 9:00-17:00 GMT,
+       http://ask.adaptec.com/.
+    - You can order Adaptec cables online at
+      http://www.adaptec.com/buy-cables/.
 
    Japan
+    - Visit our web site at http://www.adaptec.co.jp/.
     - To speak with a Technical Support Specialist, call 
-      +81-3-5308-6120 
-      Hours: Monday-Friday, 9:00 a.m. to 12:00 p.m., 1:00 p.m. to
-      6:00 p.m. TSC
-
-   Hong Kong and China
-    - To speak with a Technical Support Specialist, call 
-      +852-2869-7200
-      Hours: Monday-Friday, 10:00 to 17:00.
-    - Fax Technical Support at +852-2869-7100.
-
-   Singapore
-    - To speak with a Technical Support Specialist, call 
-      +65-245-7470
-      Hours: Monday-Friday, 10:00 to 17:00.
-    - Fax Technical Support at +852-2869-7100
+      +81 3 5308 6120, Monday-Friday, 9:00 a.m. to 12:00 p.m.,
+      1:00 p.m. to 6:00 p.m.
 
 -------------------------------------------------------------------
 /*
index 6910c0136f8d7e23458ef0279fbf7b301c3fdd0b..391dd64363e75b560f68bdd69879946b07cf03da 100644 (file)
@@ -27,6 +27,7 @@ Currently, these files are in /proc/sys/vm:
 - laptop_mode
 - block_dump
 - drop-caches
+- zone_reclaim_mode
 
 ==============================================================
 
@@ -120,3 +121,20 @@ set to pcp->high/4.  The upper limit of batch is (PAGE_SHIFT * 8)
 
 The initial value is zero.  Kernel does not use this value at boot time to set
 the high water marks for each per cpu page list.
+
+===============================================================
+
+zone_reclaim_mode:
+
+This is set during bootup to 1 if it is determined that pages from
+remote zones will cause a significant performance reduction. The
+page allocator will then reclaim easily reusable pages (those page
+cache pages that are currently not used) before going off node.
+
+The user can override this setting. It may be beneficial to switch
+off zone reclaim if the system is used for a file server and all
+of memory should be used for caching files from disk.
+
+It may be beneficial to switch this on if one wants to do zone
+reclaim regardless of the numa distances in the system.
+
index 6d1b048c62a105c12518f5ae1b67ec4a517d29bc..a37a2b38a5574418fa5060b946343ebdf21b74b1 100644 (file)
@@ -867,6 +867,15 @@ L: ebtables-devel@lists.sourceforge.net
 W:     http://ebtables.sourceforge.net/
 S:     Maintained
 
+EDAC-CORE
+P:      Doug Thompson
+M:      norsk5@xmission.com, dthompson@linuxnetworx.com
+P:      Dave Peterson
+M:      dsp@llnl.gov, dave_peterson@pobox.com
+L:      bluesmoke-devel@lists.sourceforge.net
+W:      bluesmoke.sourceforge.net
+S:      Maintained
+
 EEPRO100 NETWORK DRIVER
 P:     Andrey V. Savochkin
 M:     saw@saw.sw.com.sg
@@ -1167,8 +1176,8 @@ T:        git kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6.git
 S:     Maintained
 
 SN-IA64 (Itanium) SUB-PLATFORM
-P:     Greg Edwards
-M:     edwardsg@sgi.com
+P:     Jes Sorensen
+M:     jes@sgi.com
 L:     linux-altix@sgi.com
 L:     linux-ia64@vger.kernel.org
 W:     http://www.sgi.com/altix
@@ -1398,7 +1407,7 @@ IRDA SUBSYSTEM
 P:     Jean Tourrilhes
 L:     irda-users@lists.sourceforge.net (subscribers-only)
 W:     http://irda.sourceforge.net/
-S:     Maintained
+S:     Odd Fixes
 
 ISAPNP
 P:     Jaroslav Kysela
@@ -1696,11 +1705,13 @@ M: mtk-manpages@gmx.net
 W: ftp://ftp.kernel.org/pub/linux/docs/manpages
 S: Maintained
 
-MARVELL MV64340 ETHERNET DRIVER
+MARVELL MV643XX ETHERNET DRIVER
+P:     Dale Farnsworth
+M:     dale@farnsworth.org
 P:     Manish Lachwani
-L:     linux-mips@linux-mips.org
+M:     mlachwani@mvista.com
 L:     netdev@vger.kernel.org
-S:     Supported
+S:     Odd Fixes for 2.4; Maintained for 2.6.
 
 MATROX FRAMEBUFFER DRIVER
 P:     Petr Vandrovec
@@ -1841,7 +1852,14 @@ M:       yoshfuji@linux-ipv6.org
 P:     Patrick McHardy
 M:     kaber@coreworks.de
 L:     netdev@vger.kernel.org
-T:     git kernel.org:/pub/scm/linux/kernel/davem/net-2.6.git
+T:     git kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6.git
+S:     Maintained
+
+NETWORKING [WIRELESS]
+P:     John W. Linville
+M:     linville@tuxdriver.com
+L:     netdev@vger.kernel.org
+T:     git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git
 S:     Maintained
 
 IPVS
@@ -2534,11 +2552,11 @@ S:     Maintained
 
 TIPC NETWORK LAYER
 P:     Per Liden
-M:     per.liden@nospam.ericsson.com
+M:     per.liden@ericsson.com
 P:     Jon Maloy
-M:     jon.maloy@nospam.ericsson.com
+M:     jon.maloy@ericsson.com
 P:     Allan Stephens
-M:     allan.stephens@nospam.windriver.com
+M:     allan.stephens@windriver.com
 L:     tipc-discussion@lists.sourceforge.net
 W:     http://tipc.sourceforge.net/
 W:     http://tipc.cslab.ericsson.net/
index 01fe990d3e54b2c59d8c5fdfafbbea79a11ce1d7..7fb14f42a12519938dd6840e48f1ef3c8e66abf6 100644 (file)
@@ -960,7 +960,7 @@ osf_utimes(char __user *filename, struct timeval32 __user *tvs)
                        return -EFAULT;
        }
 
-       return do_utimes(filename, tvs ? ktvs : NULL);
+       return do_utimes(AT_FDCWD, filename, tvs ? ktvs : NULL);
 }
 
 #define MAX_SELECT_SECONDS \
index aaa47400eb9c19ce57a60731f81ca0b14addb1b3..db3389d8e0271d6cf0802db52754dae0074d8077 100644 (file)
@@ -334,7 +334,7 @@ __setup_mmu:        sub     r3, r4, #16384          @ Page directory size
                mov     r1, #0x12
                orr     r1, r1, #3 << 10
                add     r2, r3, #16384
-1:             cmp     r1, r8                  @ if virt > start of RAM
+1:             cmp     r1, r9                  @ if virt > start of RAM
                orrhs   r1, r1, #0x0c           @ set cacheable, bufferable
                cmp     r1, r10                 @ if virt > end of RAM
                bichs   r1, r1, #0x0c           @ clear cacheable, bufferable
index 6886001b53666acc69f1adc802e73b7c8883446f..4a8564f386af9e42f575c6b07199225704507ca1 100644 (file)
@@ -14,8 +14,7 @@ CONFIG_GENERIC_IOMAP=y
 # Code maturity level options
 #
 CONFIG_EXPERIMENTAL=y
-# CONFIG_CLEAN_COMPILE is not set
-CONFIG_BROKEN=y
+CONFIG_CLEAN_COMPILE=y
 CONFIG_BROKEN_ON_SMP=y
 
 #
@@ -360,7 +359,6 @@ CONFIG_BLK_DEV_IDE_BAST=y
 #
 # IEEE 1394 (FireWire) support
 #
-# CONFIG_IEEE1394 is not set
 
 #
 # I2O device support
@@ -781,7 +779,6 @@ CONFIG_SYSFS=y
 # CONFIG_DEVFS_FS is not set
 # CONFIG_DEVPTS_FS_XATTR is not set
 # CONFIG_TMPFS is not set
-# CONFIG_HUGETLBFS is not set
 # CONFIG_HUGETLB_PAGE is not set
 CONFIG_RAMFS=y
 
index 15468a0cf70e8de3583c2ad531f34e1a8992e884..c9aa878e610acd0fe9a7736456515113149987ce 100644 (file)
@@ -13,8 +13,7 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
 # Code maturity level options
 #
 CONFIG_EXPERIMENTAL=y
-# CONFIG_CLEAN_COMPILE is not set
-CONFIG_BROKEN=y
+CONFIG_CLEAN_COMPILE=y
 CONFIG_BROKEN_ON_SMP=y
 CONFIG_LOCK_KERNEL=y
 CONFIG_INIT_ENV_ARG_LIMIT=32
@@ -308,9 +307,7 @@ CONFIG_MTD_CFI_I2=y
 # CONFIG_MTD_ROM is not set
 # CONFIG_MTD_ABSENT is not set
 CONFIG_MTD_OBSOLETE_CHIPS=y
-# CONFIG_MTD_AMDSTD is not set
 CONFIG_MTD_SHARP=y
-# CONFIG_MTD_JEDEC is not set
 
 #
 # Mapping drivers for chip access
@@ -396,7 +393,6 @@ CONFIG_ATA_OVER_ETH=m
 #
 # IEEE 1394 (FireWire) support
 #
-# CONFIG_IEEE1394 is not set
 
 #
 # I2O device support
@@ -741,7 +737,6 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
 CONFIG_PROC_FS=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
-# CONFIG_HUGETLBFS is not set
 # CONFIG_HUGETLB_PAGE is not set
 CONFIG_RAMFS=y
 # CONFIG_RELAYFS_FS is not set
index fbe312e757cb17f4267b3ee8b5ee477280083d0b..3c73b707c2f387c3bb4aba5e69e9d32f700c67cd 100644 (file)
@@ -522,6 +522,7 @@ CONFIG_E100=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index c07628ceaf0c46aa60ded382c7db861a116bbcbe..32467160a6df96040d5605bafc170d38b0889930 100644 (file)
@@ -493,6 +493,7 @@ CONFIG_NETDEVICES=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 18fa1615fdfdd7f64c6cd46152cb6d3a0d4aee25..b000da753c41bb60aaeffac9848f95d6e5b832a1 100644 (file)
@@ -415,6 +415,7 @@ CONFIG_NETDEVICES=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index f50035de1fff2196e2dd03e966d3fdd7b8c3e3ee..46c79e1efe070644ed116f6b10eeb9a5cfbfa4ea 100644 (file)
@@ -496,6 +496,7 @@ CONFIG_NETDEVICES=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 18b3f372ed68a46eb9618abe67c5fac216b8b5d0..11959b705d822298380e2b2249cb1d0d1497d51f 100644 (file)
@@ -496,6 +496,7 @@ CONFIG_NETDEVICES=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 33f31080a98c4e0a54833f324d498beaf72ce13f..1964ccd8a71f7fbd9a039fe2982d590e9c132d89 100644 (file)
@@ -13,8 +13,7 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
 # Code maturity level options
 #
 CONFIG_EXPERIMENTAL=y
-# CONFIG_CLEAN_COMPILE is not set
-CONFIG_BROKEN=y
+CONFIG_CLEAN_COMPILE=y
 CONFIG_BROKEN_ON_SMP=y
 CONFIG_INIT_ENV_ARG_LIMIT=32
 
@@ -473,7 +472,6 @@ CONFIG_BLK_DEV_IDE_BAST=y
 #
 # IEEE 1394 (FireWire) support
 #
-# CONFIG_IEEE1394 is not set
 
 #
 # I2O device support
@@ -896,7 +894,6 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
 CONFIG_PROC_FS=y
 CONFIG_SYSFS=y
 # CONFIG_TMPFS is not set
-# CONFIG_HUGETLBFS is not set
 # CONFIG_HUGETLB_PAGE is not set
 CONFIG_RAMFS=y
 # CONFIG_RELAYFS_FS is not set
index 75e6f9a947133b5d2768cf8aa9ab73f1488e3625..d058e7c125681bccc28228f225d22525306925a7 100644 (file)
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  *
- *  This file is included twice in entry-common.S
+ *  This file is included thrice in entry-common.S
  */
-#ifndef NR_syscalls
-#define NR_syscalls 328
-#else
-
-100:
-/* 0 */                .long   sys_restart_syscall
-               .long   sys_exit
-               .long   sys_fork_wrapper
-               .long   sys_read
-               .long   sys_write
-/* 5 */                .long   sys_open
-               .long   sys_close
-               .long   sys_ni_syscall          /* was sys_waitpid */
-               .long   sys_creat
-               .long   sys_link
-/* 10 */       .long   sys_unlink
-               .long   sys_execve_wrapper
-               .long   sys_chdir
-               .long   OBSOLETE(sys_time)      /* used by libc4 */
-               .long   sys_mknod
-/* 15 */       .long   sys_chmod
-               .long   sys_lchown16
-               .long   sys_ni_syscall          /* was sys_break */
-               .long   sys_ni_syscall          /* was sys_stat */
-               .long   sys_lseek
-/* 20 */       .long   sys_getpid
-               .long   sys_mount
-               .long   OBSOLETE(sys_oldumount) /* used by libc4 */
-               .long   sys_setuid16
-               .long   sys_getuid16
-/* 25 */       .long   OBSOLETE(sys_stime)
-               .long   sys_ptrace
-               .long   OBSOLETE(sys_alarm)     /* used by libc4 */
-               .long   sys_ni_syscall          /* was sys_fstat */
-               .long   sys_pause
-/* 30 */       .long   OBSOLETE(sys_utime)     /* used by libc4 */
-               .long   sys_ni_syscall          /* was sys_stty */
-               .long   sys_ni_syscall          /* was sys_getty */
-               .long   sys_access
-               .long   sys_nice
-/* 35 */       .long   sys_ni_syscall          /* was sys_ftime */
-               .long   sys_sync
-               .long   sys_kill
-               .long   sys_rename
-               .long   sys_mkdir
-/* 40 */       .long   sys_rmdir
-               .long   sys_dup
-               .long   sys_pipe
-               .long   sys_times
-               .long   sys_ni_syscall          /* was sys_prof */
-/* 45 */       .long   sys_brk
-               .long   sys_setgid16
-               .long   sys_getgid16
-               .long   sys_ni_syscall          /* was sys_signal */
-               .long   sys_geteuid16
-/* 50 */       .long   sys_getegid16
-               .long   sys_acct
-               .long   sys_umount
-               .long   sys_ni_syscall          /* was sys_lock */
-               .long   sys_ioctl
-/* 55 */       .long   sys_fcntl
-               .long   sys_ni_syscall          /* was sys_mpx */
-               .long   sys_setpgid
-               .long   sys_ni_syscall          /* was sys_ulimit */
-               .long   sys_ni_syscall          /* was sys_olduname */
-/* 60 */       .long   sys_umask
-               .long   sys_chroot
-               .long   sys_ustat
-               .long   sys_dup2
-               .long   sys_getppid
-/* 65 */       .long   sys_getpgrp
-               .long   sys_setsid
-               .long   sys_sigaction
-               .long   sys_ni_syscall          /* was sys_sgetmask */
-               .long   sys_ni_syscall          /* was sys_ssetmask */
-/* 70 */       .long   sys_setreuid16
-               .long   sys_setregid16
-               .long   sys_sigsuspend_wrapper
-               .long   sys_sigpending
-               .long   sys_sethostname
-/* 75 */       .long   sys_setrlimit
-               .long   OBSOLETE(sys_old_getrlimit) /* used by libc4 */
-               .long   sys_getrusage
-               .long   sys_gettimeofday
-               .long   sys_settimeofday
-/* 80 */       .long   sys_getgroups16
-               .long   sys_setgroups16
-               .long   OBSOLETE(old_select)    /* used by libc4 */
-               .long   sys_symlink
-               .long   sys_ni_syscall          /* was sys_lstat */
-/* 85 */       .long   sys_readlink
-               .long   sys_uselib
-               .long   sys_swapon
-               .long   sys_reboot
-               .long   OBSOLETE(old_readdir)   /* used by libc4 */
-/* 90 */       .long   OBSOLETE(old_mmap)      /* used by libc4 */
-               .long   sys_munmap
-               .long   sys_truncate
-               .long   sys_ftruncate
-               .long   sys_fchmod
-/* 95 */       .long   sys_fchown16
-               .long   sys_getpriority
-               .long   sys_setpriority
-               .long   sys_ni_syscall          /* was sys_profil */
-               .long   sys_statfs
-/* 100 */      .long   sys_fstatfs
-               .long   sys_ni_syscall
-               .long   OBSOLETE(sys_socketcall)
-               .long   sys_syslog
-               .long   sys_setitimer
-/* 105 */      .long   sys_getitimer
-               .long   sys_newstat
-               .long   sys_newlstat
-               .long   sys_newfstat
-               .long   sys_ni_syscall          /* was sys_uname */
-/* 110 */      .long   sys_ni_syscall          /* was sys_iopl */
-               .long   sys_vhangup
-               .long   sys_ni_syscall
-               .long   OBSOLETE(sys_syscall)   /* call a syscall */
-               .long   sys_wait4
-/* 115 */      .long   sys_swapoff
-               .long   sys_sysinfo
-               .long   OBSOLETE(ABI(sys_ipc, sys_oabi_ipc))
-               .long   sys_fsync
-               .long   sys_sigreturn_wrapper
-/* 120 */      .long   sys_clone_wrapper
-               .long   sys_setdomainname
-               .long   sys_newuname
-               .long   sys_ni_syscall
-               .long   sys_adjtimex
-/* 125 */      .long   sys_mprotect
-               .long   sys_sigprocmask
-               .long   sys_ni_syscall          /* was sys_create_module */
-               .long   sys_init_module
-               .long   sys_delete_module
-/* 130 */      .long   sys_ni_syscall          /* was sys_get_kernel_syms */
-               .long   sys_quotactl
-               .long   sys_getpgid
-               .long   sys_fchdir
-               .long   sys_bdflush
-/* 135 */      .long   sys_sysfs
-               .long   sys_personality
-               .long   sys_ni_syscall          /* .long        _sys_afs_syscall */
-               .long   sys_setfsuid16
-               .long   sys_setfsgid16
-/* 140 */      .long   sys_llseek
-               .long   sys_getdents
-               .long   sys_select
-               .long   sys_flock
-               .long   sys_msync
-/* 145 */      .long   sys_readv
-               .long   sys_writev
-               .long   sys_getsid
-               .long   sys_fdatasync
-               .long   sys_sysctl
-/* 150 */      .long   sys_mlock
-               .long   sys_munlock
-               .long   sys_mlockall
-               .long   sys_munlockall
-               .long   sys_sched_setparam
-/* 155 */      .long   sys_sched_getparam
-               .long   sys_sched_setscheduler
-               .long   sys_sched_getscheduler
-               .long   sys_sched_yield
-               .long   sys_sched_get_priority_max
-/* 160 */      .long   sys_sched_get_priority_min
-               .long   sys_sched_rr_get_interval
-               .long   sys_nanosleep
-               .long   sys_arm_mremap
-               .long   sys_setresuid16
-/* 165 */      .long   sys_getresuid16
-               .long   sys_ni_syscall
-               .long   sys_ni_syscall          /* was sys_query_module */
-               .long   sys_poll
-               .long   sys_nfsservctl
-/* 170 */      .long   sys_setresgid16
-               .long   sys_getresgid16
-               .long   sys_prctl
-               .long   sys_rt_sigreturn_wrapper
-               .long   sys_rt_sigaction
-/* 175 */      .long   sys_rt_sigprocmask
-               .long   sys_rt_sigpending
-               .long   sys_rt_sigtimedwait
-               .long   sys_rt_sigqueueinfo
-               .long   sys_rt_sigsuspend_wrapper
-/* 180 */      .long   ABI(sys_pread64, sys_oabi_pread64)
-               .long   ABI(sys_pwrite64, sys_oabi_pwrite64)
-               .long   sys_chown16
-               .long   sys_getcwd
-               .long   sys_capget
-/* 185 */      .long   sys_capset
-               .long   sys_sigaltstack_wrapper
-               .long   sys_sendfile
-               .long   sys_ni_syscall
-               .long   sys_ni_syscall
-/* 190 */      .long   sys_vfork_wrapper
-               .long   sys_getrlimit
-               .long   sys_mmap2
-               .long   ABI(sys_truncate64, sys_oabi_truncate64)
-               .long   ABI(sys_ftruncate64, sys_oabi_ftruncate64)
-/* 195 */      .long   ABI(sys_stat64, sys_oabi_stat64)
-               .long   ABI(sys_lstat64, sys_oabi_lstat64)
-               .long   ABI(sys_fstat64, sys_oabi_fstat64)
-               .long   sys_lchown
-               .long   sys_getuid
-/* 200 */      .long   sys_getgid
-               .long   sys_geteuid
-               .long   sys_getegid
-               .long   sys_setreuid
-               .long   sys_setregid
-/* 205 */      .long   sys_getgroups
-               .long   sys_setgroups
-               .long   sys_fchown
-               .long   sys_setresuid
-               .long   sys_getresuid
-/* 210 */      .long   sys_setresgid
-               .long   sys_getresgid
-               .long   sys_chown
-               .long   sys_setuid
-               .long   sys_setgid
-/* 215 */      .long   sys_setfsuid
-               .long   sys_setfsgid
-               .long   sys_getdents64
-               .long   sys_pivot_root
-               .long   sys_mincore
-/* 220 */      .long   sys_madvise
-               .long   ABI(sys_fcntl64, sys_oabi_fcntl64)
-               .long   sys_ni_syscall /* TUX */
-               .long   sys_ni_syscall
-               .long   sys_gettid
-/* 225 */      .long   ABI(sys_readahead, sys_oabi_readahead)
-               .long   sys_setxattr
-               .long   sys_lsetxattr
-               .long   sys_fsetxattr
-               .long   sys_getxattr
-/* 230 */      .long   sys_lgetxattr
-               .long   sys_fgetxattr
-               .long   sys_listxattr
-               .long   sys_llistxattr
-               .long   sys_flistxattr
-/* 235 */      .long   sys_removexattr
-               .long   sys_lremovexattr
-               .long   sys_fremovexattr
-               .long   sys_tkill
-               .long   sys_sendfile64
-/* 240 */      .long   sys_futex
-               .long   sys_sched_setaffinity
-               .long   sys_sched_getaffinity
-               .long   sys_io_setup
-               .long   sys_io_destroy
-/* 245 */      .long   sys_io_getevents
-               .long   sys_io_submit
-               .long   sys_io_cancel
-               .long   sys_exit_group
-               .long   sys_lookup_dcookie
-/* 250 */      .long   sys_epoll_create
-               .long   ABI(sys_epoll_ctl, sys_oabi_epoll_ctl)
-               .long   ABI(sys_epoll_wait, sys_oabi_epoll_wait)
-               .long   sys_remap_file_pages
-               .long   sys_ni_syscall  /* sys_set_thread_area */
-/* 255 */      .long   sys_ni_syscall  /* sys_get_thread_area */
-               .long   sys_set_tid_address
-               .long   sys_timer_create
-               .long   sys_timer_settime
-               .long   sys_timer_gettime
-/* 260 */      .long   sys_timer_getoverrun
-               .long   sys_timer_delete
-               .long   sys_clock_settime
-               .long   sys_clock_gettime
-               .long   sys_clock_getres
-/* 265 */      .long   sys_clock_nanosleep
-               .long   sys_statfs64_wrapper
-               .long   sys_fstatfs64_wrapper
-               .long   sys_tgkill
-               .long   sys_utimes
-/* 270 */      .long   sys_arm_fadvise64_64
-               .long   sys_pciconfig_iobase
-               .long   sys_pciconfig_read
-               .long   sys_pciconfig_write
-               .long   sys_mq_open
-/* 275 */      .long   sys_mq_unlink
-               .long   sys_mq_timedsend
-               .long   sys_mq_timedreceive
-               .long   sys_mq_notify
-               .long   sys_mq_getsetattr
-/* 280 */      .long   sys_waitid
-               .long   sys_socket
-               .long   sys_bind
-               .long   sys_connect
-               .long   sys_listen
-/* 285 */      .long   sys_accept
-               .long   sys_getsockname
-               .long   sys_getpeername
-               .long   sys_socketpair
-               .long   sys_send
-/* 290 */      .long   sys_sendto
-               .long   sys_recv
-               .long   sys_recvfrom
-               .long   sys_shutdown
-               .long   sys_setsockopt
-/* 295 */      .long   sys_getsockopt
-               .long   sys_sendmsg
-               .long   sys_recvmsg
-               .long   ABI(sys_semop, sys_oabi_semop)
-               .long   sys_semget
-/* 300 */      .long   sys_semctl
-               .long   sys_msgsnd
-               .long   sys_msgrcv
-               .long   sys_msgget
-               .long   sys_msgctl
-/* 305 */      .long   sys_shmat
-               .long   sys_shmdt
-               .long   sys_shmget
-               .long   sys_shmctl
-               .long   sys_add_key
-/* 310 */      .long   sys_request_key
-               .long   sys_keyctl
-               .long   ABI(sys_semtimedop, sys_oabi_semtimedop)
-/* vserver */  .long   sys_ni_syscall
-               .long   sys_ioprio_set
-/* 315 */      .long   sys_ioprio_get
-               .long   sys_inotify_init
-               .long   sys_inotify_add_watch
-               .long   sys_inotify_rm_watch
-               .long   sys_mbind
-/* 320 */      .long   sys_get_mempolicy
-               .long   sys_set_mempolicy
-
-               .rept   NR_syscalls - (. - 100b) / 4
-                       .long   sys_ni_syscall
-               .endr
+/* 0 */                CALL(sys_restart_syscall)
+               CALL(sys_exit)
+               CALL(sys_fork_wrapper)
+               CALL(sys_read)
+               CALL(sys_write)
+/* 5 */                CALL(sys_open)
+               CALL(sys_close)
+               CALL(sys_ni_syscall)            /* was sys_waitpid */
+               CALL(sys_creat)
+               CALL(sys_link)
+/* 10 */       CALL(sys_unlink)
+               CALL(sys_execve_wrapper)
+               CALL(sys_chdir)
+               CALL(OBSOLETE(sys_time))        /* used by libc4 */
+               CALL(sys_mknod)
+/* 15 */       CALL(sys_chmod)
+               CALL(sys_lchown16)
+               CALL(sys_ni_syscall)            /* was sys_break */
+               CALL(sys_ni_syscall)            /* was sys_stat */
+               CALL(sys_lseek)
+/* 20 */       CALL(sys_getpid)
+               CALL(sys_mount)
+               CALL(OBSOLETE(sys_oldumount))   /* used by libc4 */
+               CALL(sys_setuid16)
+               CALL(sys_getuid16)
+/* 25 */       CALL(OBSOLETE(sys_stime))
+               CALL(sys_ptrace)
+               CALL(OBSOLETE(sys_alarm))       /* used by libc4 */
+               CALL(sys_ni_syscall)            /* was sys_fstat */
+               CALL(sys_pause)
+/* 30 */       CALL(OBSOLETE(sys_utime))       /* used by libc4 */
+               CALL(sys_ni_syscall)            /* was sys_stty */
+               CALL(sys_ni_syscall)            /* was sys_getty */
+               CALL(sys_access)
+               CALL(sys_nice)
+/* 35 */       CALL(sys_ni_syscall)            /* was sys_ftime */
+               CALL(sys_sync)
+               CALL(sys_kill)
+               CALL(sys_rename)
+               CALL(sys_mkdir)
+/* 40 */       CALL(sys_rmdir)
+               CALL(sys_dup)
+               CALL(sys_pipe)
+               CALL(sys_times)
+               CALL(sys_ni_syscall)            /* was sys_prof */
+/* 45 */       CALL(sys_brk)
+               CALL(sys_setgid16)
+               CALL(sys_getgid16)
+               CALL(sys_ni_syscall)            /* was sys_signal */
+               CALL(sys_geteuid16)
+/* 50 */       CALL(sys_getegid16)
+               CALL(sys_acct)
+               CALL(sys_umount)
+               CALL(sys_ni_syscall)            /* was sys_lock */
+               CALL(sys_ioctl)
+/* 55 */       CALL(sys_fcntl)
+               CALL(sys_ni_syscall)            /* was sys_mpx */
+               CALL(sys_setpgid)
+               CALL(sys_ni_syscall)            /* was sys_ulimit */
+               CALL(sys_ni_syscall)            /* was sys_olduname */
+/* 60 */       CALL(sys_umask)
+               CALL(sys_chroot)
+               CALL(sys_ustat)
+               CALL(sys_dup2)
+               CALL(sys_getppid)
+/* 65 */       CALL(sys_getpgrp)
+               CALL(sys_setsid)
+               CALL(sys_sigaction)
+               CALL(sys_ni_syscall)            /* was sys_sgetmask */
+               CALL(sys_ni_syscall)            /* was sys_ssetmask */
+/* 70 */       CALL(sys_setreuid16)
+               CALL(sys_setregid16)
+               CALL(sys_sigsuspend_wrapper)
+               CALL(sys_sigpending)
+               CALL(sys_sethostname)
+/* 75 */       CALL(sys_setrlimit)
+               CALL(OBSOLETE(sys_old_getrlimit)) /* used by libc4 */
+               CALL(sys_getrusage)
+               CALL(sys_gettimeofday)
+               CALL(sys_settimeofday)
+/* 80 */       CALL(sys_getgroups16)
+               CALL(sys_setgroups16)
+               CALL(OBSOLETE(old_select))      /* used by libc4 */
+               CALL(sys_symlink)
+               CALL(sys_ni_syscall)            /* was sys_lstat */
+/* 85 */       CALL(sys_readlink)
+               CALL(sys_uselib)
+               CALL(sys_swapon)
+               CALL(sys_reboot)
+               CALL(OBSOLETE(old_readdir))     /* used by libc4 */
+/* 90 */       CALL(OBSOLETE(old_mmap))        /* used by libc4 */
+               CALL(sys_munmap)
+               CALL(sys_truncate)
+               CALL(sys_ftruncate)
+               CALL(sys_fchmod)
+/* 95 */       CALL(sys_fchown16)
+               CALL(sys_getpriority)
+               CALL(sys_setpriority)
+               CALL(sys_ni_syscall)            /* was sys_profil */
+               CALL(sys_statfs)
+/* 100 */      CALL(sys_fstatfs)
+               CALL(sys_ni_syscall)
+               CALL(OBSOLETE(sys_socketcall))
+               CALL(sys_syslog)
+               CALL(sys_setitimer)
+/* 105 */      CALL(sys_getitimer)
+               CALL(sys_newstat)
+               CALL(sys_newlstat)
+               CALL(sys_newfstat)
+               CALL(sys_ni_syscall)            /* was sys_uname */
+/* 110 */      CALL(sys_ni_syscall)            /* was sys_iopl */
+               CALL(sys_vhangup)
+               CALL(sys_ni_syscall)
+               CALL(OBSOLETE(sys_syscall))     /* call a syscall */
+               CALL(sys_wait4)
+/* 115 */      CALL(sys_swapoff)
+               CALL(sys_sysinfo)
+               CALL(OBSOLETE(ABI(sys_ipc, sys_oabi_ipc)))
+               CALL(sys_fsync)
+               CALL(sys_sigreturn_wrapper)
+/* 120 */      CALL(sys_clone_wrapper)
+               CALL(sys_setdomainname)
+               CALL(sys_newuname)
+               CALL(sys_ni_syscall)
+               CALL(sys_adjtimex)
+/* 125 */      CALL(sys_mprotect)
+               CALL(sys_sigprocmask)
+               CALL(sys_ni_syscall)            /* was sys_create_module */
+               CALL(sys_init_module)
+               CALL(sys_delete_module)
+/* 130 */      CALL(sys_ni_syscall)            /* was sys_get_kernel_syms */
+               CALL(sys_quotactl)
+               CALL(sys_getpgid)
+               CALL(sys_fchdir)
+               CALL(sys_bdflush)
+/* 135 */      CALL(sys_sysfs)
+               CALL(sys_personality)
+               CALL(sys_ni_syscall)            /* CALL(_sys_afs_syscall) */
+               CALL(sys_setfsuid16)
+               CALL(sys_setfsgid16)
+/* 140 */      CALL(sys_llseek)
+               CALL(sys_getdents)
+               CALL(sys_select)
+               CALL(sys_flock)
+               CALL(sys_msync)
+/* 145 */      CALL(sys_readv)
+               CALL(sys_writev)
+               CALL(sys_getsid)
+               CALL(sys_fdatasync)
+               CALL(sys_sysctl)
+/* 150 */      CALL(sys_mlock)
+               CALL(sys_munlock)
+               CALL(sys_mlockall)
+               CALL(sys_munlockall)
+               CALL(sys_sched_setparam)
+/* 155 */      CALL(sys_sched_getparam)
+               CALL(sys_sched_setscheduler)
+               CALL(sys_sched_getscheduler)
+               CALL(sys_sched_yield)
+               CALL(sys_sched_get_priority_max)
+/* 160 */      CALL(sys_sched_get_priority_min)
+               CALL(sys_sched_rr_get_interval)
+               CALL(sys_nanosleep)
+               CALL(sys_arm_mremap)
+               CALL(sys_setresuid16)
+/* 165 */      CALL(sys_getresuid16)
+               CALL(sys_ni_syscall)
+               CALL(sys_ni_syscall)            /* was sys_query_module */
+               CALL(sys_poll)
+               CALL(sys_nfsservctl)
+/* 170 */      CALL(sys_setresgid16)
+               CALL(sys_getresgid16)
+               CALL(sys_prctl)
+               CALL(sys_rt_sigreturn_wrapper)
+               CALL(sys_rt_sigaction)
+/* 175 */      CALL(sys_rt_sigprocmask)
+               CALL(sys_rt_sigpending)
+               CALL(sys_rt_sigtimedwait)
+               CALL(sys_rt_sigqueueinfo)
+               CALL(sys_rt_sigsuspend_wrapper)
+/* 180 */      CALL(ABI(sys_pread64, sys_oabi_pread64))
+               CALL(ABI(sys_pwrite64, sys_oabi_pwrite64))
+               CALL(sys_chown16)
+               CALL(sys_getcwd)
+               CALL(sys_capget)
+/* 185 */      CALL(sys_capset)
+               CALL(sys_sigaltstack_wrapper)
+               CALL(sys_sendfile)
+               CALL(sys_ni_syscall)
+               CALL(sys_ni_syscall)
+/* 190 */      CALL(sys_vfork_wrapper)
+               CALL(sys_getrlimit)
+               CALL(sys_mmap2)
+               CALL(ABI(sys_truncate64, sys_oabi_truncate64))
+               CALL(ABI(sys_ftruncate64, sys_oabi_ftruncate64))
+/* 195 */      CALL(ABI(sys_stat64, sys_oabi_stat64))
+               CALL(ABI(sys_lstat64, sys_oabi_lstat64))
+               CALL(ABI(sys_fstat64, sys_oabi_fstat64))
+               CALL(sys_lchown)
+               CALL(sys_getuid)
+/* 200 */      CALL(sys_getgid)
+               CALL(sys_geteuid)
+               CALL(sys_getegid)
+               CALL(sys_setreuid)
+               CALL(sys_setregid)
+/* 205 */      CALL(sys_getgroups)
+               CALL(sys_setgroups)
+               CALL(sys_fchown)
+               CALL(sys_setresuid)
+               CALL(sys_getresuid)
+/* 210 */      CALL(sys_setresgid)
+               CALL(sys_getresgid)
+               CALL(sys_chown)
+               CALL(sys_setuid)
+               CALL(sys_setgid)
+/* 215 */      CALL(sys_setfsuid)
+               CALL(sys_setfsgid)
+               CALL(sys_getdents64)
+               CALL(sys_pivot_root)
+               CALL(sys_mincore)
+/* 220 */      CALL(sys_madvise)
+               CALL(ABI(sys_fcntl64, sys_oabi_fcntl64))
+               CALL(sys_ni_syscall) /* TUX */
+               CALL(sys_ni_syscall)
+               CALL(sys_gettid)
+/* 225 */      CALL(ABI(sys_readahead, sys_oabi_readahead))
+               CALL(sys_setxattr)
+               CALL(sys_lsetxattr)
+               CALL(sys_fsetxattr)
+               CALL(sys_getxattr)
+/* 230 */      CALL(sys_lgetxattr)
+               CALL(sys_fgetxattr)
+               CALL(sys_listxattr)
+               CALL(sys_llistxattr)
+               CALL(sys_flistxattr)
+/* 235 */      CALL(sys_removexattr)
+               CALL(sys_lremovexattr)
+               CALL(sys_fremovexattr)
+               CALL(sys_tkill)
+               CALL(sys_sendfile64)
+/* 240 */      CALL(sys_futex)
+               CALL(sys_sched_setaffinity)
+               CALL(sys_sched_getaffinity)
+               CALL(sys_io_setup)
+               CALL(sys_io_destroy)
+/* 245 */      CALL(sys_io_getevents)
+               CALL(sys_io_submit)
+               CALL(sys_io_cancel)
+               CALL(sys_exit_group)
+               CALL(sys_lookup_dcookie)
+/* 250 */      CALL(sys_epoll_create)
+               CALL(ABI(sys_epoll_ctl, sys_oabi_epoll_ctl))
+               CALL(ABI(sys_epoll_wait, sys_oabi_epoll_wait))
+               CALL(sys_remap_file_pages)
+               CALL(sys_ni_syscall)    /* sys_set_thread_area */
+/* 255 */      CALL(sys_ni_syscall)    /* sys_get_thread_area */
+               CALL(sys_set_tid_address)
+               CALL(sys_timer_create)
+               CALL(sys_timer_settime)
+               CALL(sys_timer_gettime)
+/* 260 */      CALL(sys_timer_getoverrun)
+               CALL(sys_timer_delete)
+               CALL(sys_clock_settime)
+               CALL(sys_clock_gettime)
+               CALL(sys_clock_getres)
+/* 265 */      CALL(sys_clock_nanosleep)
+               CALL(sys_statfs64_wrapper)
+               CALL(sys_fstatfs64_wrapper)
+               CALL(sys_tgkill)
+               CALL(sys_utimes)
+/* 270 */      CALL(sys_arm_fadvise64_64)
+               CALL(sys_pciconfig_iobase)
+               CALL(sys_pciconfig_read)
+               CALL(sys_pciconfig_write)
+               CALL(sys_mq_open)
+/* 275 */      CALL(sys_mq_unlink)
+               CALL(sys_mq_timedsend)
+               CALL(sys_mq_timedreceive)
+               CALL(sys_mq_notify)
+               CALL(sys_mq_getsetattr)
+/* 280 */      CALL(sys_waitid)
+               CALL(sys_socket)
+               CALL(sys_bind)
+               CALL(sys_connect)
+               CALL(sys_listen)
+/* 285 */      CALL(sys_accept)
+               CALL(sys_getsockname)
+               CALL(sys_getpeername)
+               CALL(sys_socketpair)
+               CALL(sys_send)
+/* 290 */      CALL(sys_sendto)
+               CALL(sys_recv)
+               CALL(sys_recvfrom)
+               CALL(sys_shutdown)
+               CALL(sys_setsockopt)
+/* 295 */      CALL(sys_getsockopt)
+               CALL(sys_sendmsg)
+               CALL(sys_recvmsg)
+               CALL(ABI(sys_semop, sys_oabi_semop))
+               CALL(sys_semget)
+/* 300 */      CALL(sys_semctl)
+               CALL(sys_msgsnd)
+               CALL(sys_msgrcv)
+               CALL(sys_msgget)
+               CALL(sys_msgctl)
+/* 305 */      CALL(sys_shmat)
+               CALL(sys_shmdt)
+               CALL(sys_shmget)
+               CALL(sys_shmctl)
+               CALL(sys_add_key)
+/* 310 */      CALL(sys_request_key)
+               CALL(sys_keyctl)
+               CALL(ABI(sys_semtimedop, sys_oabi_semtimedop))
+/* vserver */  CALL(sys_ni_syscall)
+               CALL(sys_ioprio_set)
+/* 315 */      CALL(sys_ioprio_get)
+               CALL(sys_inotify_init)
+               CALL(sys_inotify_add_watch)
+               CALL(sys_inotify_rm_watch)
+               CALL(sys_mbind)
+/* 320 */      CALL(sys_get_mempolicy)
+               CALL(sys_set_mempolicy)
+#ifndef syscalls_counted
+.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
+#define syscalls_counted
 #endif
+.rept syscalls_padding
+               CALL(sys_ni_syscall)
+.endr
index 874e6bb7940578fd86878e69ae9ddc7b91eee03f..d401d908c46361264452bd925ffd1ca3c5c1a3c5 100644 (file)
@@ -735,8 +735,11 @@ __kuser_cmpxchg:                           @ 0xffff0fc0
         * The kernel itself must perform the operation.
         * A special ghost syscall is used for that (see traps.c).
         */
+       stmfd   sp!, {r7, lr}
+       mov     r7, #0xff00             @ 0xfff0 into r7 for EABI
+       orr     r7, r7, #0xf0
        swi     #0x9ffff0
-       mov     pc, lr
+       ldmfd   sp!, {r7, pc}
 
 #elif __LINUX_ARM_ARCH__ < 6
 
index 2b92ce85f97feef93415fd25c9260afa6367a6ae..dbcb11a31f78167b5577c55cfe4660c922043a20 100644 (file)
@@ -87,7 +87,11 @@ ENTRY(ret_from_fork)
        b       ret_slow_syscall
        
 
+       .equ NR_syscalls,0
+#define CALL(x) .equ NR_syscalls,NR_syscalls+1
 #include "calls.S"
+#undef CALL
+#define CALL(x) .long x
 
 /*=============================================================================
  * SWI handler
index 765922bcf9e7c2f37b0bf606ad57a6ac3cd8d7dd..a0cd0a90a10d06f4bd266db044937796337c1eb3 100644 (file)
 #define SWI_SYS_SIGRETURN      (0xef000000|(__NR_sigreturn))
 #define SWI_SYS_RT_SIGRETURN   (0xef000000|(__NR_rt_sigreturn))
 
+/*
+ * With EABI, the syscall number has to be loaded into r7.
+ */
+#define MOV_R7_NR_SIGRETURN    (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
+#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
+
 /*
  * For Thumb syscalls, we pass the syscall number via r7.  We therefore
  * need two 16-bit instructions.
@@ -36,9 +42,9 @@
 #define SWI_THUMB_SIGRETURN    (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
 
-const unsigned long sigreturn_codes[4] = {
-       SWI_SYS_SIGRETURN,      SWI_THUMB_SIGRETURN,
-       SWI_SYS_RT_SIGRETURN,   SWI_THUMB_RT_SIGRETURN
+const unsigned long sigreturn_codes[7] = {
+       MOV_R7_NR_SIGRETURN,    SWI_SYS_SIGRETURN,    SWI_THUMB_SIGRETURN,
+       MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
 };
 
 static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall);
@@ -189,7 +195,7 @@ struct aux_sigframe {
 struct sigframe {
        struct sigcontext sc;
        unsigned long extramask[_NSIG_WORDS-1];
-       unsigned long retcode;
+       unsigned long retcode[2];
        struct aux_sigframe aux __attribute__((aligned(8)));
 };
 
@@ -198,7 +204,7 @@ struct rt_sigframe {
        void __user *puc;
        struct siginfo info;
        struct ucontext uc;
-       unsigned long retcode;
+       unsigned long retcode[2];
        struct aux_sigframe aux __attribute__((aligned(8)));
 };
 
@@ -436,12 +442,13 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
        if (ka->sa.sa_flags & SA_RESTORER) {
                retcode = (unsigned long)ka->sa.sa_restorer;
        } else {
-               unsigned int idx = thumb;
+               unsigned int idx = thumb << 1;
 
                if (ka->sa.sa_flags & SA_SIGINFO)
-                       idx += 2;
+                       idx += 3;
 
-               if (__put_user(sigreturn_codes[idx], rc))
+               if (__put_user(sigreturn_codes[idx],   rc) ||
+                   __put_user(sigreturn_codes[idx+1], rc+1))
                        return 1;
 
                if (cpsr & MODE32_BIT) {
@@ -456,7 +463,7 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
                         * the return code written onto the stack.
                         */
                        flush_icache_range((unsigned long)rc,
-                                          (unsigned long)(rc + 1));
+                                          (unsigned long)(rc + 2));
 
                        retcode = ((unsigned long)rc) + thumb;
                }
@@ -488,7 +495,7 @@ setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *reg
        }
 
        if (err == 0)
-               err = setup_return(regs, ka, &frame->retcode, frame, usig);
+               err = setup_return(regs, ka, frame->retcode, frame, usig);
 
        return err;
 }
@@ -522,7 +529,7 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
        err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
 
        if (err == 0)
-               err = setup_return(regs, ka, &frame->retcode, frame, usig);
+               err = setup_return(regs, ka, frame->retcode, frame, usig);
 
        if (err == 0) {
                /*
index 91d26faca62b58c6f07d9e50950cc54efaa0e9c2..9991049c522d65ef50873d8d923f8d5021f9fce8 100644 (file)
@@ -9,4 +9,4 @@
  */
 #define KERN_SIGRETURN_CODE    0xffff0500
 
-extern const unsigned long sigreturn_codes[4];
+extern const unsigned long sigreturn_codes[7];
index 31820170f3068600a373915227d9f6a090cdf9f1..a0724f2b24cec783a242887971b5fc0d2e5a6592 100644 (file)
@@ -469,7 +469,9 @@ static void cp_clcd_enable(struct clcd_fb *fb)
        if (fb->fb.var.bits_per_pixel <= 8)
                val = CM_CTRL_LCDMUXSEL_VGA_8421BPP;
        else if (fb->fb.var.bits_per_pixel <= 16)
-               val = CM_CTRL_LCDMUXSEL_VGA_16BPP;
+               val = CM_CTRL_LCDMUXSEL_VGA_16BPP
+                       | CM_CTRL_LCDEN0 | CM_CTRL_LCDEN1
+                       | CM_CTRL_STATIC1 | CM_CTRL_STATIC2;
        else
                val = 0; /* no idea for this, don't trust the docs */
 
index 6b393691d0e898cdcc0b4aaebb6da5ab2469c470..4bdc9d4526cdfdb6b67a65a65781bfc3efa74aff 100644 (file)
@@ -333,6 +333,7 @@ static struct platform_device *ixp46x_devices[] __initdata = {
 };
 
 unsigned long ixp4xx_exp_bus_size;
+EXPORT_SYMBOL(ixp4xx_exp_bus_size);
 
 void __init ixp4xx_sys_init(void)
 {
@@ -352,7 +353,7 @@ void __init ixp4xx_sys_init(void)
                }
        }
 
-       printk("IXP4xx: Using %uMiB expansion bus window size\n",
+       printk("IXP4xx: Using %luMiB expansion bus window size\n",
                        ixp4xx_exp_bus_size >> 20);
 }
 
index 9d862f86bba6525cf8b8aa76183adb1dfd7df2ed..75110ba10424626e0f84539bfc2f70e7f603d0aa 100644 (file)
@@ -50,10 +50,10 @@ static int omap1_clk_enable_dsp_domain(struct clk *clk)
 {
        int retval;
 
-       retval = omap1_clk_use(&api_ck.clk);
+       retval = omap1_clk_enable(&api_ck.clk);
        if (!retval) {
-               retval = omap1_clk_enable(clk);
-               omap1_clk_unuse(&api_ck.clk);
+               retval = omap1_clk_enable_generic(clk);
+               omap1_clk_disable(&api_ck.clk);
        }
 
        return retval;
@@ -61,9 +61,9 @@ static int omap1_clk_enable_dsp_domain(struct clk *clk)
 
 static void omap1_clk_disable_dsp_domain(struct clk *clk)
 {
-       if (omap1_clk_use(&api_ck.clk) == 0) {
-               omap1_clk_disable(clk);
-               omap1_clk_unuse(&api_ck.clk);
+       if (omap1_clk_enable(&api_ck.clk) == 0) {
+               omap1_clk_disable_generic(clk);
+               omap1_clk_disable(&api_ck.clk);
        }
 }
 
@@ -72,7 +72,7 @@ static int omap1_clk_enable_uart_functional(struct clk *clk)
        int ret;
        struct uart_clk *uclk;
 
-       ret = omap1_clk_enable(clk);
+       ret = omap1_clk_enable_generic(clk);
        if (ret == 0) {
                /* Set smart idle acknowledgement mode */
                uclk = (struct uart_clk *)clk;
@@ -91,7 +91,7 @@ static void omap1_clk_disable_uart_functional(struct clk *clk)
        uclk = (struct uart_clk *)clk;
        omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
 
-       omap1_clk_disable(clk);
+       omap1_clk_disable_generic(clk);
 }
 
 static void omap1_clk_allow_idle(struct clk *clk)
@@ -230,9 +230,9 @@ static void omap1_ckctl_recalc_dsp_domain(struct clk * clk)
         * Note that DSP_CKCTL virt addr = phys addr, so
         * we must use __raw_readw() instead of omap_readw().
         */
-       omap1_clk_use(&api_ck.clk);
+       omap1_clk_enable(&api_ck.clk);
        dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
-       omap1_clk_unuse(&api_ck.clk);
+       omap1_clk_disable(&api_ck.clk);
 
        if (unlikely(clk->rate == clk->parent->rate / dsor))
                return; /* No change, quick exit */
@@ -412,12 +412,12 @@ static void omap1_init_ext_clk(struct clk * clk)
        clk-> rate = 96000000 / dsor;
 }
 
-static int omap1_clk_use(struct clk *clk)
+static int omap1_clk_enable(struct clk *clk)
 {
        int ret = 0;
        if (clk->usecount++ == 0) {
                if (likely(clk->parent)) {
-                       ret = omap1_clk_use(clk->parent);
+                       ret = omap1_clk_enable(clk->parent);
 
                        if (unlikely(ret != 0)) {
                                clk->usecount--;
@@ -432,7 +432,7 @@ static int omap1_clk_use(struct clk *clk)
                ret = clk->enable(clk);
 
                if (unlikely(ret != 0) && clk->parent) {
-                       omap1_clk_unuse(clk->parent);
+                       omap1_clk_disable(clk->parent);
                        clk->usecount--;
                }
        }
@@ -440,12 +440,12 @@ static int omap1_clk_use(struct clk *clk)
        return ret;
 }
 
-static void omap1_clk_unuse(struct clk *clk)
+static void omap1_clk_disable(struct clk *clk)
 {
        if (clk->usecount > 0 && !(--clk->usecount)) {
                clk->disable(clk);
                if (likely(clk->parent)) {
-                       omap1_clk_unuse(clk->parent);
+                       omap1_clk_disable(clk->parent);
                        if (clk->flags & CLOCK_NO_IDLE_PARENT)
                                if (!cpu_is_omap24xx())
                                        omap1_clk_allow_idle(clk->parent);
@@ -453,7 +453,7 @@ static void omap1_clk_unuse(struct clk *clk)
        }
 }
 
-static int omap1_clk_enable(struct clk *clk)
+static int omap1_clk_enable_generic(struct clk *clk)
 {
        __u16 regval16;
        __u32 regval32;
@@ -492,7 +492,7 @@ static int omap1_clk_enable(struct clk *clk)
        return 0;
 }
 
-static void omap1_clk_disable(struct clk *clk)
+static void omap1_clk_disable_generic(struct clk *clk)
 {
        __u16 regval16;
        __u32 regval32;
@@ -654,8 +654,8 @@ late_initcall(omap1_late_clk_reset);
 #endif
 
 static struct clk_functions omap1_clk_functions = {
-       .clk_use                = omap1_clk_use,
-       .clk_unuse              = omap1_clk_unuse,
+       .clk_enable             = omap1_clk_enable,
+       .clk_disable            = omap1_clk_disable,
        .clk_round_rate         = omap1_clk_round_rate,
        .clk_set_rate           = omap1_clk_set_rate,
 };
@@ -780,9 +780,9 @@ int __init omap1_clk_init(void)
         * Only enable those clocks we will need, let the drivers
         * enable other clocks as necessary
         */
-       clk_use(&armper_ck.clk);
-       clk_use(&armxor_ck.clk);
-       clk_use(&armtim_ck.clk); /* This should be done by timer code */
+       clk_enable(&armper_ck.clk);
+       clk_enable(&armxor_ck.clk);
+       clk_enable(&armtim_ck.clk); /* This should be done by timer code */
 
        if (cpu_is_omap1510())
                clk_enable(&arm_gpio_ck);
index f3bdfb50e01a73042b31c64a2d7563c8a739ecd0..4f18d1b94449b7c925ce4304e3d0e1e555776490 100644 (file)
@@ -13,8 +13,8 @@
 #ifndef __ARCH_ARM_MACH_OMAP1_CLOCK_H
 #define __ARCH_ARM_MACH_OMAP1_CLOCK_H
 
-static int omap1_clk_enable(struct clk * clk);
-static void omap1_clk_disable(struct clk * clk);
+static int omap1_clk_enable_generic(struct clk * clk);
+static void omap1_clk_disable_generic(struct clk * clk);
 static void omap1_ckctl_recalc(struct clk * clk);
 static void omap1_watchdog_recalc(struct clk * clk);
 static void omap1_ckctl_recalc_dsp_domain(struct clk * clk);
@@ -30,8 +30,8 @@ static long omap1_round_ext_clk_rate(struct clk * clk, unsigned long rate);
 static void omap1_init_ext_clk(struct clk * clk);
 static int omap1_select_table_rate(struct clk * clk, unsigned long rate);
 static long omap1_round_to_table_rate(struct clk * clk, unsigned long rate);
-static int omap1_clk_use(struct clk *clk);
-static void omap1_clk_unuse(struct clk *clk);
+static int omap1_clk_enable(struct clk *clk);
+static void omap1_clk_disable(struct clk *clk);
 
 struct mpu_rate {
        unsigned long           rate;
@@ -152,8 +152,8 @@ static struct clk ck_ref = {
        .rate           = 12000000,
        .flags          = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
                          ALWAYS_ENABLED,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk ck_dpll1 = {
@@ -161,8 +161,8 @@ static struct clk ck_dpll1 = {
        .parent         = &ck_ref,
        .flags          = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
                          RATE_PROPAGATES | ALWAYS_ENABLED,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct arm_idlect1_clk ck_dpll1out = {
@@ -173,8 +173,8 @@ static struct arm_idlect1_clk ck_dpll1out = {
                .enable_reg     = (void __iomem *)ARM_IDLECT2,
                .enable_bit     = EN_CKOUT_ARM,
                .recalc         = &followparent_recalc,
-               .enable         = &omap1_clk_enable,
-               .disable        = &omap1_clk_disable,
+               .enable         = &omap1_clk_enable_generic,
+               .disable        = &omap1_clk_disable_generic,
        },
        .idlect_shift   = 12,
 };
@@ -186,8 +186,8 @@ static struct clk arm_ck = {
                          RATE_CKCTL | RATE_PROPAGATES | ALWAYS_ENABLED,
        .rate_offset    = CKCTL_ARMDIV_OFFSET,
        .recalc         = &omap1_ckctl_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct arm_idlect1_clk armper_ck = {
@@ -200,8 +200,8 @@ static struct arm_idlect1_clk armper_ck = {
                .enable_bit     = EN_PERCK,
                .rate_offset    = CKCTL_PERDIV_OFFSET,
                .recalc         = &omap1_ckctl_recalc,
-               .enable         = &omap1_clk_enable,
-               .disable        = &omap1_clk_disable,
+               .enable         = &omap1_clk_enable_generic,
+               .disable        = &omap1_clk_disable_generic,
        },
        .idlect_shift   = 2,
 };
@@ -213,8 +213,8 @@ static struct clk arm_gpio_ck = {
        .enable_reg     = (void __iomem *)ARM_IDLECT2,
        .enable_bit     = EN_GPIOCK,
        .recalc         = &followparent_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct arm_idlect1_clk armxor_ck = {
@@ -226,8 +226,8 @@ static struct arm_idlect1_clk armxor_ck = {
                .enable_reg     = (void __iomem *)ARM_IDLECT2,
                .enable_bit     = EN_XORPCK,
                .recalc         = &followparent_recalc,
-               .enable         = &omap1_clk_enable,
-               .disable        = &omap1_clk_disable,
+               .enable         = &omap1_clk_enable_generic,
+               .disable        = &omap1_clk_disable_generic,
        },
        .idlect_shift   = 1,
 };
@@ -241,8 +241,8 @@ static struct arm_idlect1_clk armtim_ck = {
                .enable_reg     = (void __iomem *)ARM_IDLECT2,
                .enable_bit     = EN_TIMCK,
                .recalc         = &followparent_recalc,
-               .enable         = &omap1_clk_enable,
-               .disable        = &omap1_clk_disable,
+               .enable         = &omap1_clk_enable_generic,
+               .disable        = &omap1_clk_disable_generic,
        },
        .idlect_shift   = 9,
 };
@@ -256,8 +256,8 @@ static struct arm_idlect1_clk armwdt_ck = {
                .enable_reg     = (void __iomem *)ARM_IDLECT2,
                .enable_bit     = EN_WDTCK,
                .recalc         = &omap1_watchdog_recalc,
-               .enable         = &omap1_clk_enable,
-               .disable        = &omap1_clk_disable,
+               .enable         = &omap1_clk_enable_generic,
+               .disable        = &omap1_clk_disable_generic,
        },
        .idlect_shift   = 0,
 };
@@ -272,8 +272,8 @@ static struct clk arminth_ck16xx = {
         *
         * 1510 version is in TC clocks.
         */
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk dsp_ck = {
@@ -285,8 +285,8 @@ static struct clk dsp_ck = {
        .enable_bit     = EN_DSPCK,
        .rate_offset    = CKCTL_DSPDIV_OFFSET,
        .recalc         = &omap1_ckctl_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk dspmmu_ck = {
@@ -296,8 +296,8 @@ static struct clk dspmmu_ck = {
                          RATE_CKCTL | ALWAYS_ENABLED,
        .rate_offset    = CKCTL_DSPMMUDIV_OFFSET,
        .recalc         = &omap1_ckctl_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk dspper_ck = {
@@ -349,8 +349,8 @@ static struct arm_idlect1_clk tc_ck = {
                                  CLOCK_IDLE_CONTROL,
                .rate_offset    = CKCTL_TCDIV_OFFSET,
                .recalc         = &omap1_ckctl_recalc,
-               .enable         = &omap1_clk_enable,
-               .disable        = &omap1_clk_disable,
+               .enable         = &omap1_clk_enable_generic,
+               .disable        = &omap1_clk_disable_generic,
        },
        .idlect_shift   = 6,
 };
@@ -364,8 +364,8 @@ static struct clk arminth_ck1510 = {
         *
         * 16xx version is in MPU clocks.
         */
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk tipb_ck = {
@@ -374,8 +374,8 @@ static struct clk tipb_ck = {
        .parent         = &tc_ck.clk,
        .flags          = CLOCK_IN_OMAP1510 | ALWAYS_ENABLED,
        .recalc         = &followparent_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk l3_ocpi_ck = {
@@ -386,8 +386,8 @@ static struct clk l3_ocpi_ck = {
        .enable_reg     = (void __iomem *)ARM_IDLECT3,
        .enable_bit     = EN_OCPI_CK,
        .recalc         = &followparent_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk tc1_ck = {
@@ -397,8 +397,8 @@ static struct clk tc1_ck = {
        .enable_reg     = (void __iomem *)ARM_IDLECT3,
        .enable_bit     = EN_TC1_CK,
        .recalc         = &followparent_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk tc2_ck = {
@@ -408,8 +408,8 @@ static struct clk tc2_ck = {
        .enable_reg     = (void __iomem *)ARM_IDLECT3,
        .enable_bit     = EN_TC2_CK,
        .recalc         = &followparent_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk dma_ck = {
@@ -419,8 +419,8 @@ static struct clk dma_ck = {
        .flags          = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
                          ALWAYS_ENABLED,
        .recalc         = &followparent_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk dma_lcdfree_ck = {
@@ -428,8 +428,8 @@ static struct clk dma_lcdfree_ck = {
        .parent         = &tc_ck.clk,
        .flags          = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
        .recalc         = &followparent_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct arm_idlect1_clk api_ck = {
@@ -441,8 +441,8 @@ static struct arm_idlect1_clk api_ck = {
                .enable_reg     = (void __iomem *)ARM_IDLECT2,
                .enable_bit     = EN_APICK,
                .recalc         = &followparent_recalc,
-               .enable         = &omap1_clk_enable,
-               .disable        = &omap1_clk_disable,
+               .enable         = &omap1_clk_enable_generic,
+               .disable        = &omap1_clk_disable_generic,
        },
        .idlect_shift   = 8,
 };
@@ -455,8 +455,8 @@ static struct arm_idlect1_clk lb_ck = {
                .enable_reg     = (void __iomem *)ARM_IDLECT2,
                .enable_bit     = EN_LBCK,
                .recalc         = &followparent_recalc,
-               .enable         = &omap1_clk_enable,
-               .disable        = &omap1_clk_disable,
+               .enable         = &omap1_clk_enable_generic,
+               .disable        = &omap1_clk_disable_generic,
        },
        .idlect_shift   = 4,
 };
@@ -466,8 +466,8 @@ static struct clk rhea1_ck = {
        .parent         = &tc_ck.clk,
        .flags          = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
        .recalc         = &followparent_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk rhea2_ck = {
@@ -475,8 +475,8 @@ static struct clk rhea2_ck = {
        .parent         = &tc_ck.clk,
        .flags          = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
        .recalc         = &followparent_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk lcd_ck_16xx = {
@@ -487,8 +487,8 @@ static struct clk lcd_ck_16xx = {
        .enable_bit     = EN_LCDCK,
        .rate_offset    = CKCTL_LCDDIV_OFFSET,
        .recalc         = &omap1_ckctl_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct arm_idlect1_clk lcd_ck_1510 = {
@@ -501,8 +501,8 @@ static struct arm_idlect1_clk lcd_ck_1510 = {
                .enable_bit     = EN_LCDCK,
                .rate_offset    = CKCTL_LCDDIV_OFFSET,
                .recalc         = &omap1_ckctl_recalc,
-               .enable         = &omap1_clk_enable,
-               .disable        = &omap1_clk_disable,
+               .enable         = &omap1_clk_enable_generic,
+               .disable        = &omap1_clk_disable_generic,
        },
        .idlect_shift   = 3,
 };
@@ -518,8 +518,8 @@ static struct clk uart1_1510 = {
        .enable_bit     = 29,   /* Chooses between 12MHz and 48MHz */
        .set_rate       = &omap1_set_uart_rate,
        .recalc         = &omap1_uart_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct uart_clk uart1_16xx = {
@@ -550,8 +550,8 @@ static struct clk uart2_ck = {
        .enable_bit     = 30,   /* Chooses between 12MHz and 48MHz */
        .set_rate       = &omap1_set_uart_rate,
        .recalc         = &omap1_uart_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk uart3_1510 = {
@@ -565,8 +565,8 @@ static struct clk uart3_1510 = {
        .enable_bit     = 31,   /* Chooses between 12MHz and 48MHz */
        .set_rate       = &omap1_set_uart_rate,
        .recalc         = &omap1_uart_recalc,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct uart_clk uart3_16xx = {
@@ -593,8 +593,8 @@ static struct clk usb_clko = {      /* 6 MHz output on W4_USB_CLKO */
                          RATE_FIXED | ENABLE_REG_32BIT,
        .enable_reg     = (void __iomem *)ULPD_CLOCK_CTRL,
        .enable_bit     = USB_MCLK_EN_BIT,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk usb_hhc_ck1510 = {
@@ -605,8 +605,8 @@ static struct clk usb_hhc_ck1510 = {
                          RATE_FIXED | ENABLE_REG_32BIT,
        .enable_reg     = (void __iomem *)MOD_CONF_CTRL_0,
        .enable_bit     = USB_HOST_HHC_UHOST_EN,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk usb_hhc_ck16xx = {
@@ -618,8 +618,8 @@ static struct clk usb_hhc_ck16xx = {
                          RATE_FIXED | ENABLE_REG_32BIT,
        .enable_reg     = (void __iomem *)OTG_BASE + 0x08 /* OTG_SYSCON_2 */,
        .enable_bit     = 8 /* UHOST_EN */,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk usb_dc_ck = {
@@ -629,8 +629,8 @@ static struct clk usb_dc_ck = {
        .flags          = CLOCK_IN_OMAP16XX | RATE_FIXED,
        .enable_reg     = (void __iomem *)SOFT_REQ_REG,
        .enable_bit     = 4,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk mclk_1510 = {
@@ -638,8 +638,8 @@ static struct clk mclk_1510 = {
        /* Direct from ULPD, no parent. May be enabled by ext hardware. */
        .rate           = 12000000,
        .flags          = CLOCK_IN_OMAP1510 | RATE_FIXED,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk mclk_16xx = {
@@ -651,8 +651,8 @@ static struct clk mclk_16xx = {
        .set_rate       = &omap1_set_ext_clk_rate,
        .round_rate     = &omap1_round_ext_clk_rate,
        .init           = &omap1_init_ext_clk,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk bclk_1510 = {
@@ -660,8 +660,8 @@ static struct clk bclk_1510 = {
        /* Direct from ULPD, no parent. May be enabled by ext hardware. */
        .rate           = 12000000,
        .flags          = CLOCK_IN_OMAP1510 | RATE_FIXED,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk bclk_16xx = {
@@ -673,8 +673,8 @@ static struct clk bclk_16xx = {
        .set_rate       = &omap1_set_ext_clk_rate,
        .round_rate     = &omap1_round_ext_clk_rate,
        .init           = &omap1_init_ext_clk,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk mmc1_ck = {
@@ -686,8 +686,8 @@ static struct clk mmc1_ck = {
                          RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
        .enable_reg     = (void __iomem *)MOD_CONF_CTRL_0,
        .enable_bit     = 23,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk mmc2_ck = {
@@ -699,8 +699,8 @@ static struct clk mmc2_ck = {
                          RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
        .enable_reg     = (void __iomem *)MOD_CONF_CTRL_0,
        .enable_bit     = 20,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk virtual_ck_mpu = {
@@ -711,8 +711,8 @@ static struct clk virtual_ck_mpu = {
        .recalc         = &followparent_recalc,
        .set_rate       = &omap1_select_table_rate,
        .round_rate     = &omap1_round_to_table_rate,
-       .enable         = &omap1_clk_enable,
-       .disable        = &omap1_clk_disable,
+       .enable         = &omap1_clk_enable_generic,
+       .disable        = &omap1_clk_disable_generic,
 };
 
 static struct clk * onchip_clks[] = {
index 7a68f098a0254365da8f1da60edd94f081f9df65..e924e0c6a4ce99106ced4bdc523ea806d2e4197f 100644 (file)
@@ -146,7 +146,7 @@ void __init omap_serial_init(void)
                        if (IS_ERR(uart1_ck))
                                printk("Could not get uart1_ck\n");
                        else {
-                               clk_use(uart1_ck);
+                               clk_enable(uart1_ck);
                                if (cpu_is_omap1510())
                                        clk_set_rate(uart1_ck, 12000000);
                        }
@@ -166,7 +166,7 @@ void __init omap_serial_init(void)
                        if (IS_ERR(uart2_ck))
                                printk("Could not get uart2_ck\n");
                        else {
-                               clk_use(uart2_ck);
+                               clk_enable(uart2_ck);
                                if (cpu_is_omap1510())
                                        clk_set_rate(uart2_ck, 12000000);
                                else
@@ -188,7 +188,7 @@ void __init omap_serial_init(void)
                        if (IS_ERR(uart3_ck))
                                printk("Could not get uart3_ck\n");
                        else {
-                               clk_use(uart3_ck);
+                               clk_enable(uart3_ck);
                                if (cpu_is_omap1510())
                                        clk_set_rate(uart3_ck, 12000000);
                        }
index 5407b954915011ca62d5c46d4c5c0dace238867b..180f675c9064094d1fb95486b5bdfbad8c1d8f8c 100644 (file)
@@ -111,7 +111,7 @@ static void omap2_clk_fixed_enable(struct clk *clk)
 /* Enables clock without considering parent dependencies or use count
  * REVISIT: Maybe change this to use clk->enable like on omap1?
  */
-static int omap2_clk_enable(struct clk * clk)
+static int _omap2_clk_enable(struct clk * clk)
 {
        u32 regval32;
 
@@ -150,7 +150,7 @@ static void omap2_clk_fixed_disable(struct clk *clk)
 }
 
 /* Disables clock without considering parent dependencies or use count */
-static void omap2_clk_disable(struct clk *clk)
+static void _omap2_clk_disable(struct clk *clk)
 {
        u32 regval32;
 
@@ -167,23 +167,23 @@ static void omap2_clk_disable(struct clk *clk)
        __raw_writel(regval32, clk->enable_reg);
 }
 
-static int omap2_clk_use(struct clk *clk)
+static int omap2_clk_enable(struct clk *clk)
 {
        int ret = 0;
 
        if (clk->usecount++ == 0) {
                if (likely((u32)clk->parent))
-                       ret = omap2_clk_use(clk->parent);
+                       ret = omap2_clk_enable(clk->parent);
 
                if (unlikely(ret != 0)) {
                        clk->usecount--;
                        return ret;
                }
 
-               ret = omap2_clk_enable(clk);
+               ret = _omap2_clk_enable(clk);
 
                if (unlikely(ret != 0) && clk->parent) {
-                       omap2_clk_unuse(clk->parent);
+                       omap2_clk_disable(clk->parent);
                        clk->usecount--;
                }
        }
@@ -191,12 +191,12 @@ static int omap2_clk_use(struct clk *clk)
        return ret;
 }
 
-static void omap2_clk_unuse(struct clk *clk)
+static void omap2_clk_disable(struct clk *clk)
 {
        if (clk->usecount > 0 && !(--clk->usecount)) {
-               omap2_clk_disable(clk);
+               _omap2_clk_disable(clk);
                if (likely((u32)clk->parent))
-                       omap2_clk_unuse(clk->parent);
+                       omap2_clk_disable(clk->parent);
        }
 }
 
@@ -873,7 +873,7 @@ static int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
                reg = (void __iomem *)src_sel;
 
                if (clk->usecount > 0)
-                       omap2_clk_disable(clk);
+                       _omap2_clk_disable(clk);
 
                /* Set new source value (previous dividers if any in effect) */
                reg_val = __raw_readl(reg) & ~(field_mask << src_off);
@@ -884,7 +884,7 @@ static int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
                        __raw_writel(0x1, (void __iomem *)&PRCM_CLKCFG_CTRL);
 
                if (clk->usecount > 0)
-                       omap2_clk_enable(clk);
+                       _omap2_clk_enable(clk);
 
                clk->parent = new_parent;
 
@@ -999,8 +999,6 @@ static int omap2_select_table_rate(struct clk * clk, unsigned long rate)
 static struct clk_functions omap2_clk_functions = {
        .clk_enable             = omap2_clk_enable,
        .clk_disable            = omap2_clk_disable,
-       .clk_use                = omap2_clk_use,
-       .clk_unuse              = omap2_clk_unuse,
        .clk_round_rate         = omap2_clk_round_rate,
        .clk_set_rate           = omap2_clk_set_rate,
        .clk_set_parent         = omap2_clk_set_parent,
@@ -1045,7 +1043,7 @@ static void __init omap2_disable_unused_clocks(void)
                        continue;
 
                printk(KERN_INFO "Disabling unused clock \"%s\"\n", ck->name);
-               omap2_clk_disable(ck);
+               _omap2_clk_disable(ck);
        }
 }
 late_initcall(omap2_disable_unused_clocks);
@@ -1120,10 +1118,10 @@ int __init omap2_clk_init(void)
         * Only enable those clocks we will need, let the drivers
         * enable other clocks as necessary
         */
-       clk_use(&sync_32k_ick);
-       clk_use(&omapctrl_ick);
+       clk_enable(&sync_32k_ick);
+       clk_enable(&omapctrl_ick);
        if (cpu_is_omap2430())
-               clk_use(&sdrc_ick);
+               clk_enable(&sdrc_ick);
 
        return 0;
 }
index 4aeab5591bd395d7cb93cbeaa0db2d53642ef78e..6cab20b1d3c1d2ad2cc2d317017f6037b095aa9f 100644 (file)
@@ -24,7 +24,7 @@ static void omap2_propagate_rate(struct clk * clk);
 static void omap2_mpu_recalc(struct clk * clk);
 static int omap2_select_table_rate(struct clk * clk, unsigned long rate);
 static long omap2_round_to_table_rate(struct clk * clk, unsigned long rate);
-static void omap2_clk_unuse(struct clk *clk);
+static void omap2_clk_disable(struct clk *clk);
 static void omap2_sys_clk_recalc(struct clk * clk);
 static u32 omap2_clksel_to_divisor(u32 div_sel, u32 field_val);
 static u32 omap2_clksel_get_divisor(struct clk *clk);
@@ -859,7 +859,7 @@ static struct clk core_l3_ck = {    /* Used for ick and fck, interconnect */
 
 static struct clk usb_l4_ick = {       /* FS-USB interface clock */
        .name           = "usb_l4_ick",
-       .parent         = &core_ck,
+       .parent         = &core_l3_ck,
        .flags          = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
                                RATE_CKCTL | CM_CORE_SEL1 | DELAYED_APP |
                                CONFIG_PARTICIPANT,
@@ -1045,7 +1045,7 @@ static struct clk gpt1_ick = {
        .name           = "gpt1_ick",
        .parent         = &l4_ck,
        .flags          = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
-       .enable_reg     = (void __iomem *)&CM_ICLKEN_WKUP,      /* Bit4 */
+       .enable_reg     = (void __iomem *)&CM_ICLKEN_WKUP,      /* Bit0 */
        .enable_bit     = 0,
        .recalc         = &omap2_followparent_recalc,
 };
@@ -1055,7 +1055,7 @@ static struct clk gpt1_fck = {
        .parent         = &func_32k_ck,
        .flags          = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
                                CM_WKUP_SEL1,
-       .enable_reg     = (void __iomem *)&CM_FCLKEN_WKUP,
+       .enable_reg     = (void __iomem *)&CM_FCLKEN_WKUP,      /* Bit0 */
        .enable_bit     = 0,
        .src_offset     = 0,
        .recalc         = &omap2_followparent_recalc,
@@ -1065,7 +1065,7 @@ static struct clk gpt2_ick = {
        .name           = "gpt2_ick",
        .parent         = &l4_ck,
        .flags          = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
-       .enable_reg     = (void __iomem *)&CM_ICLKEN1_CORE,     /* bit4 */
+       .enable_reg     = (void __iomem *)&CM_ICLKEN1_CORE,     /* Bit4 */
        .enable_bit     = 0,
        .recalc         = &omap2_followparent_recalc,
 };
@@ -1839,7 +1839,7 @@ static struct clk usb_fck = {
 
 static struct clk usbhs_ick = {
        .name           = "usbhs_ick",
-       .parent         = &l4_ck,
+       .parent         = &core_l3_ck,
        .flags          = CLOCK_IN_OMAP243X,
        .enable_reg     = (void __iomem *)&CM_ICLKEN2_CORE,
        .enable_bit     = 6,
index e1bd46a96e117c21d41637e85b715c7ddec17ff6..24dd374224afaa8741c72d1d7c0dcf520a605fe6 100644 (file)
@@ -119,14 +119,14 @@ void __init omap_serial_init()
                        if (IS_ERR(uart1_ick))
                                printk("Could not get uart1_ick\n");
                        else {
-                               clk_use(uart1_ick);
+                               clk_enable(uart1_ick);
                        }
 
                        uart1_fck = clk_get(NULL, "uart1_fck");
                        if (IS_ERR(uart1_fck))
                                printk("Could not get uart1_fck\n");
                        else {
-                               clk_use(uart1_fck);
+                               clk_enable(uart1_fck);
                        }
                        break;
                case 1:
@@ -134,14 +134,14 @@ void __init omap_serial_init()
                        if (IS_ERR(uart2_ick))
                                printk("Could not get uart2_ick\n");
                        else {
-                               clk_use(uart2_ick);
+                               clk_enable(uart2_ick);
                        }
 
                        uart2_fck = clk_get(NULL, "uart2_fck");
                        if (IS_ERR(uart2_fck))
                                printk("Could not get uart2_fck\n");
                        else {
-                               clk_use(uart2_fck);
+                               clk_enable(uart2_fck);
                        }
                        break;
                case 2:
@@ -149,14 +149,14 @@ void __init omap_serial_init()
                        if (IS_ERR(uart3_ick))
                                printk("Could not get uart3_ick\n");
                        else {
-                               clk_use(uart3_ick);
+                               clk_enable(uart3_ick);
                        }
 
                        uart3_fck = clk_get(NULL, "uart3_fck");
                        if (IS_ERR(uart3_fck))
                                printk("Could not get uart3_fck\n");
                        else {
-                               clk_use(uart3_fck);
+                               clk_enable(uart3_fck);
                        }
                        break;
                }
index 23d36b1c40fee848dcc35ab41cc71d88b09fce25..1d2f5ac2f69b8ec6d59f1bcaf41aeb53757e7816 100644 (file)
@@ -104,7 +104,7 @@ static void __init omap2_gp_timer_init(void)
        if (IS_ERR(sys_ck))
                printk(KERN_ERR "Could not get sys_ck\n");
        else {
-               clk_use(sys_ck);
+               clk_enable(sys_ck);
                tick_period = clk_get_rate(sys_ck) / 100;
                clk_put(sys_ck);
        }
index 9cbe5eef492b26a26a4d586ad6d96266f37de8fb..fc1067783f6d18ee7fb8a0572be4c9d81b871a17 100644 (file)
  *     14-Jan-2005 BJD  Added s3c24xx_init_clocks() call
  *     10-Mar-2005 LCVR Changed S3C2410_{VA,SZ} to S3C24XX_{VA,SZ} & IODESC_ENT
  *     14-Mar-2005 BJD  Updated for __iomem
+ *     15-Jan-2006 LCVR Updated S3C2410_PA_##x to new S3C24XX_PA_##x macro
 */
 
 /* todo - fix when rmk changes iodescs to use `void __iomem *` */
 
-#define IODESC_ENT(x) { (unsigned long)S3C24XX_VA_##x, __phys_to_pfn(S3C2410_PA_##x), S3C24XX_SZ_##x, MT_DEVICE }
+#define IODESC_ENT(x) { (unsigned long)S3C24XX_VA_##x, __phys_to_pfn(S3C24XX_PA_##x), S3C24XX_SZ_##x, MT_DEVICE }
 
 #ifndef MHZ
 #define MHZ (1000*1000)
index f58406e6ef5a69a3d65103597813ecffdd3a587d..b8d994a24d1c18173252feedadceb562449a2a23 100644 (file)
@@ -10,6 +10,7 @@
  * published by the Free Software Foundation.
  *
  * Modifications:
+ *     15-Jan-2006 LCVR Using S3C24XX_PA_##x macro for common S3C24XX devices
  *     10-Mar-2005 LCVR Changed S3C2410_{VA,SZ} to S3C24XX_{VA,SZ}
  *     10-Feb-2005 BJD  Added camera from guillaume.gourat@nexvision.tv
  *     29-Aug-2004 BJD  Added timers 0 through 3
@@ -46,8 +47,8 @@ struct platform_device *s3c24xx_uart_devs[3];
 
 static struct resource s3c_usb_resource[] = {
        [0] = {
-               .start = S3C2410_PA_USBHOST,
-               .end   = S3C2410_PA_USBHOST + S3C24XX_SZ_USBHOST - 1,
+               .start = S3C24XX_PA_USBHOST,
+               .end   = S3C24XX_PA_USBHOST + S3C24XX_SZ_USBHOST - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -76,8 +77,8 @@ EXPORT_SYMBOL(s3c_device_usb);
 
 static struct resource s3c_lcd_resource[] = {
        [0] = {
-               .start = S3C2410_PA_LCD,
-               .end   = S3C2410_PA_LCD + S3C24XX_SZ_LCD - 1,
+               .start = S3C24XX_PA_LCD,
+               .end   = S3C24XX_PA_LCD + S3C24XX_SZ_LCD - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -139,8 +140,8 @@ EXPORT_SYMBOL(s3c_device_nand);
 
 static struct resource s3c_usbgadget_resource[] = {
        [0] = {
-               .start = S3C2410_PA_USBDEV,
-               .end   = S3C2410_PA_USBDEV + S3C24XX_SZ_USBDEV - 1,
+               .start = S3C24XX_PA_USBDEV,
+               .end   = S3C24XX_PA_USBDEV + S3C24XX_SZ_USBDEV - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -164,8 +165,8 @@ EXPORT_SYMBOL(s3c_device_usbgadget);
 
 static struct resource s3c_wdt_resource[] = {
        [0] = {
-               .start = S3C2410_PA_WATCHDOG,
-               .end   = S3C2410_PA_WATCHDOG + S3C24XX_SZ_WATCHDOG - 1,
+               .start = S3C24XX_PA_WATCHDOG,
+               .end   = S3C24XX_PA_WATCHDOG + S3C24XX_SZ_WATCHDOG - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -189,8 +190,8 @@ EXPORT_SYMBOL(s3c_device_wdt);
 
 static struct resource s3c_i2c_resource[] = {
        [0] = {
-               .start = S3C2410_PA_IIC,
-               .end   = S3C2410_PA_IIC + S3C24XX_SZ_IIC - 1,
+               .start = S3C24XX_PA_IIC,
+               .end   = S3C24XX_PA_IIC + S3C24XX_SZ_IIC - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -214,8 +215,8 @@ EXPORT_SYMBOL(s3c_device_i2c);
 
 static struct resource s3c_iis_resource[] = {
        [0] = {
-               .start = S3C2410_PA_IIS,
-               .end   = S3C2410_PA_IIS + S3C24XX_SZ_IIS -1,
+               .start = S3C24XX_PA_IIS,
+               .end   = S3C24XX_PA_IIS + S3C24XX_SZ_IIS -1,
                .flags = IORESOURCE_MEM,
        }
 };
@@ -239,8 +240,8 @@ EXPORT_SYMBOL(s3c_device_iis);
 
 static struct resource s3c_rtc_resource[] = {
        [0] = {
-               .start = S3C2410_PA_RTC,
-               .end   = S3C2410_PA_RTC + 0xff,
+               .start = S3C24XX_PA_RTC,
+               .end   = S3C24XX_PA_RTC + 0xff,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -268,8 +269,8 @@ EXPORT_SYMBOL(s3c_device_rtc);
 
 static struct resource s3c_adc_resource[] = {
        [0] = {
-               .start = S3C2410_PA_ADC,
-               .end   = S3C2410_PA_ADC + S3C24XX_SZ_ADC - 1,
+               .start = S3C24XX_PA_ADC,
+               .end   = S3C24XX_PA_ADC + S3C24XX_SZ_ADC - 1,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -316,8 +317,8 @@ EXPORT_SYMBOL(s3c_device_sdi);
 
 static struct resource s3c_spi0_resource[] = {
        [0] = {
-               .start = S3C2410_PA_SPI,
-               .end   = S3C2410_PA_SPI + 0x1f,
+               .start = S3C24XX_PA_SPI,
+               .end   = S3C24XX_PA_SPI + 0x1f,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -341,8 +342,8 @@ EXPORT_SYMBOL(s3c_device_spi0);
 
 static struct resource s3c_spi1_resource[] = {
        [0] = {
-               .start = S3C2410_PA_SPI + 0x20,
-               .end   = S3C2410_PA_SPI + 0x20 + 0x1f,
+               .start = S3C24XX_PA_SPI + 0x20,
+               .end   = S3C24XX_PA_SPI + 0x20 + 0x1f,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -366,8 +367,8 @@ EXPORT_SYMBOL(s3c_device_spi1);
 
 static struct resource s3c_timer0_resource[] = {
        [0] = {
-               .start = S3C2410_PA_TIMER + 0x0C,
-               .end   = S3C2410_PA_TIMER + 0x0C + 0xB,
+               .start = S3C24XX_PA_TIMER + 0x0C,
+               .end   = S3C24XX_PA_TIMER + 0x0C + 0xB,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -391,8 +392,8 @@ EXPORT_SYMBOL(s3c_device_timer0);
 
 static struct resource s3c_timer1_resource[] = {
        [0] = {
-               .start = S3C2410_PA_TIMER + 0x18,
-               .end   = S3C2410_PA_TIMER + 0x23,
+               .start = S3C24XX_PA_TIMER + 0x18,
+               .end   = S3C24XX_PA_TIMER + 0x23,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -416,8 +417,8 @@ EXPORT_SYMBOL(s3c_device_timer1);
 
 static struct resource s3c_timer2_resource[] = {
        [0] = {
-               .start = S3C2410_PA_TIMER + 0x24,
-               .end   = S3C2410_PA_TIMER + 0x2F,
+               .start = S3C24XX_PA_TIMER + 0x24,
+               .end   = S3C24XX_PA_TIMER + 0x2F,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
@@ -441,8 +442,8 @@ EXPORT_SYMBOL(s3c_device_timer2);
 
 static struct resource s3c_timer3_resource[] = {
        [0] = {
-               .start = S3C2410_PA_TIMER + 0x30,
-               .end   = S3C2410_PA_TIMER + 0x3B,
+               .start = S3C24XX_PA_TIMER + 0x30,
+               .end   = S3C24XX_PA_TIMER + 0x3B,
                .flags = IORESOURCE_MEM,
        },
        [1] = {
index 65feaf20d23e842d1d46ba88ed674f4942558c7a..4dbd8e758ea6f9691f73e5c06439fa4d2efec2fc 100644 (file)
@@ -1152,7 +1152,7 @@ static int __init s3c2410_init_dma(void)
 
        printk("S3C2410 DMA Driver, (c) 2003-2004 Simtec Electronics\n");
 
-       dma_base = ioremap(S3C2410_PA_DMA, 0x200);
+       dma_base = ioremap(S3C24XX_PA_DMA, 0x200);
        if (dma_base == NULL) {
                printk(KERN_ERR "dma failed to remap register block\n");
                return -ENOMEM;
index 61768dac7feed7b3ebaada47062d0f71831d2748..e9a055b779b7f1158c1759ebd7cecab08e4a2ab7 100644 (file)
@@ -133,12 +133,12 @@ ENTRY(s3c2410_cpu_resume)
        @@ load UART to allow us to print the two characters for
        @@ resume debug
 
-       mov     r2, #S3C2410_PA_UART & 0xff000000
-       orr     r2, r2, #S3C2410_PA_UART & 0xff000
+       mov     r2, #S3C24XX_PA_UART & 0xff000000
+       orr     r2, r2, #S3C24XX_PA_UART & 0xff000
 
 #if 0
        /* SMDK2440 LED set */
-       mov     r14, #S3C2410_PA_GPIO
+       mov     r14, #S3C24XX_PA_GPIO
        ldr     r12, [ r14, #0x54 ]
        bic     r12, r12, #3<<4
        orr     r12, r12, #1<<7
index de3ce1eec2ece4d222a2293bfa325fb585104433..da9b35974118d34d2b6fac17bb246306a6e19228 100644 (file)
@@ -142,7 +142,7 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
                return NULL;
        addr = (unsigned long)area->addr;
        if (remap_area_pages(addr, pfn, size, flags)) {
-               vfree(addr);
+               vfree((void *)addr);
                return NULL;
        }
        return (void __iomem *) (offset + (char *)addr);
index d0245a31d4dd15f8382e0de7087b3027e97acbc7..ef8d30a185a97574657ee66d31134b9feec76363 100644 (file)
@@ -343,6 +343,12 @@ static struct mem_types mem_types[] __initdata = {
                                PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
                                PMD_SECT_TEX(1),
                .domain    = DOMAIN_IO,
+       },
+       [MT_NONSHARED_DEVICE] = {
+               .prot_l1   = PMD_TYPE_TABLE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_NONSHARED_DEV |
+                               PMD_SECT_AP_WRITE,
+               .domain    = DOMAIN_IO,
        }
 };
 
index 7ebc5a29db8d68d6de060d8142fbc4f851e7f3e8..3c2bfc0efdaf6f5bdee3f7abcf0bc0db6f0d4a2a 100644 (file)
@@ -34,7 +34,7 @@ DEFINE_SPINLOCK(clockfw_lock);
 static struct clk_functions *arch_clock;
 
 /*-------------------------------------------------------------------------
- * Standard clock functions defined in asm/hardware/clock.h
+ * Standard clock functions defined in include/linux/clk.h
  *-------------------------------------------------------------------------*/
 
 struct clk * clk_get(struct device *dev, const char *id)
@@ -60,12 +60,8 @@ int clk_enable(struct clk *clk)
        int ret = 0;
 
        spin_lock_irqsave(&clockfw_lock, flags);
-       if (clk->enable)
-               ret = clk->enable(clk);
-       else if (arch_clock->clk_enable)
+       if (arch_clock->clk_enable)
                ret = arch_clock->clk_enable(clk);
-       else
-               printk(KERN_ERR "Could not enable clock %s\n", clk->name);
        spin_unlock_irqrestore(&clockfw_lock, flags);
 
        return ret;
@@ -77,41 +73,12 @@ void clk_disable(struct clk *clk)
        unsigned long flags;
 
        spin_lock_irqsave(&clockfw_lock, flags);
-       if (clk->disable)
-               clk->disable(clk);
-       else if (arch_clock->clk_disable)
+       if (arch_clock->clk_disable)
                arch_clock->clk_disable(clk);
-       else
-               printk(KERN_ERR "Could not disable clock %s\n", clk->name);
        spin_unlock_irqrestore(&clockfw_lock, flags);
 }
 EXPORT_SYMBOL(clk_disable);
 
-int clk_use(struct clk *clk)
-{
-       unsigned long flags;
-       int ret = 0;
-
-       spin_lock_irqsave(&clockfw_lock, flags);
-       if (arch_clock->clk_use)
-               ret = arch_clock->clk_use(clk);
-       spin_unlock_irqrestore(&clockfw_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(clk_use);
-
-void clk_unuse(struct clk *clk)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&clockfw_lock, flags);
-       if (arch_clock->clk_unuse)
-               arch_clock->clk_unuse(clk);
-       spin_unlock_irqrestore(&clockfw_lock, flags);
-}
-EXPORT_SYMBOL(clk_unuse);
-
 int clk_get_usecount(struct clk *clk)
 {
        unsigned long flags;
@@ -146,7 +113,7 @@ void clk_put(struct clk *clk)
 EXPORT_SYMBOL(clk_put);
 
 /*-------------------------------------------------------------------------
- * Optional clock functions defined in asm/hardware/clock.h
+ * Optional clock functions defined in include/linux/clk.h
  *-------------------------------------------------------------------------*/
 
 long clk_round_rate(struct clk *clk, unsigned long rate)
index ca3681a824ac6581d9bfb2c3fe27e0b74b28091a..b4d5b9e4bfce68c72805c158b4a038ebc2dc4d65 100644 (file)
@@ -853,19 +853,19 @@ static int __init _omap_gpio_init(void)
                if (IS_ERR(gpio_ick))
                        printk("Could not get arm_gpio_ck\n");
                else
-                       clk_use(gpio_ick);
+                       clk_enable(gpio_ick);
        }
        if (cpu_is_omap24xx()) {
                gpio_ick = clk_get(NULL, "gpios_ick");
                if (IS_ERR(gpio_ick))
                        printk("Could not get gpios_ick\n");
                else
-                       clk_use(gpio_ick);
+                       clk_enable(gpio_ick);
                gpio_fck = clk_get(NULL, "gpios_fck");
                if (IS_ERR(gpio_ick))
                        printk("Could not get gpios_fck\n");
                else
-                       clk_use(gpio_fck);
+                       clk_enable(gpio_fck);
        }
 
 #ifdef CONFIG_ARCH_OMAP15XX
index be0e0f32a598f0927f2c923ac95790b4c72d18dc..1cd2cace7e1b595d21361031a96774dd0436cef6 100644 (file)
@@ -190,11 +190,11 @@ static int omap_mcbsp_check(unsigned int id)
 static void omap_mcbsp_dsp_request(void)
 {
        if (cpu_is_omap1510() || cpu_is_omap16xx()) {
-               clk_use(mcbsp_dsp_ck);
-               clk_use(mcbsp_api_ck);
+               clk_enable(mcbsp_dsp_ck);
+               clk_enable(mcbsp_api_ck);
 
                /* enable 12MHz clock to mcbsp 1 & 3 */
-               clk_use(mcbsp_dspxor_ck);
+               clk_enable(mcbsp_dspxor_ck);
 
                /*
                 * DSP external peripheral reset
@@ -208,9 +208,9 @@ static void omap_mcbsp_dsp_request(void)
 static void omap_mcbsp_dsp_free(void)
 {
        if (cpu_is_omap1510() || cpu_is_omap16xx()) {
-               clk_unuse(mcbsp_dspxor_ck);
-               clk_unuse(mcbsp_dsp_ck);
-               clk_unuse(mcbsp_api_ck);
+               clk_disable(mcbsp_dspxor_ck);
+               clk_disable(mcbsp_dsp_ck);
+               clk_disable(mcbsp_api_ck);
        }
 }
 
index e40fcc8b43d4d621d874bdfdc02ffff96b864bdc..5cc6775c789c949a4ff2d7848665fc2a71480ed1 100644 (file)
@@ -88,7 +88,7 @@ static int __init omap_ocpi_init(void)
        if (IS_ERR(ocpi_ck))
                return PTR_ERR(ocpi_ck);
 
-       clk_use(ocpi_ck);
+       clk_enable(ocpi_ck);
        ocpi_enable();
        printk("OMAP OCPI interconnect driver loaded\n");
 
@@ -102,7 +102,7 @@ static void __exit omap_ocpi_exit(void)
        if (!cpu_is_omap16xx())
                return;
 
-       clk_unuse(ocpi_ck);
+       clk_disable(ocpi_ck);
        clk_put(ocpi_ck);
 }
 
index 5b7146f54fd5814f4ba00447c04d676b4dd945cf..679c1d5cc95807ad9a67b92a88b35f547ef94bd0 100644 (file)
@@ -35,74 +35,22 @@ struct fdpic_func_descriptor {
        unsigned long   GOT;
 };
 
-static int do_signal(sigset_t *oldset);
-
 /*
  * Atomically swap in the new signal mask, and wait for a signal.
  */
 asmlinkage int sys_sigsuspend(int history0, int history1, old_sigset_t mask)
 {
-       sigset_t saveset;
-
        mask &= _BLOCKABLE;
        spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
+       current->saved_sigmask = current->blocked;
        siginitset(&current->blocked, mask);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       __frame->gr8 = -EINTR;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (do_signal(&saveset))
-                       /* return the signal number as the return value of this function
-                        * - this is an utterly evil hack. syscalls should not invoke do_signal()
-                        *   as entry.S sets regs->gr8 to the return value of the system call
-                        * - we can't just use sigpending() as we'd have to discard SIG_IGN signals
-                        *   and call waitpid() if SIGCHLD needed discarding
-                        * - this only works on the i386 because it passes arguments to the signal
-                        *   handler on the stack, and the return value in EAX is effectively
-                        *   discarded
-                        */
-                       return __frame->gr8;
-       }
-}
-
-asmlinkage int sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
-{
-       sigset_t saveset, newset;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t))
-               return -EINVAL;
-
-       if (copy_from_user(&newset, unewset, sizeof(newset)))
-               return -EFAULT;
-       sigdelsetmask(&newset, ~_BLOCKABLE);
-
-       spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       __frame->gr8 = -EINTR;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (do_signal(&saveset))
-                       /* return the signal number as the return value of this function
-                        * - this is an utterly evil hack. syscalls should not invoke do_signal()
-                        *   as entry.S sets regs->gr8 to the return value of the system call
-                        * - we can't just use sigpending() as we'd have to discard SIG_IGN signals
-                        *   and call waitpid() if SIGCHLD needed discarding
-                        * - this only works on the i386 because it passes arguments to the signal
-                        *   handler on the stack, and the return value in EAX is effectively
-                        *   discarded
-                        */
-                       return __frame->gr8;
-       }
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+       set_thread_flag(TIF_RESTORE_SIGMASK);
+       return -ERESTARTNOHAND;
 }
 
 asmlinkage int sys_sigaction(int sig,
@@ -372,11 +320,11 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
               frame->pretcode);
 #endif
 
-       return 1;
+       return 0;
 
 give_sigsegv:
        force_sig(SIGSEGV, current);
-       return 0;
+       return -EFAULT;
 
 } /* end setup_frame() */
 
@@ -471,11 +419,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
               frame->pretcode);
 #endif
 
-       return 1;
+       return 0;
 
 give_sigsegv:
        force_sig(SIGSEGV, current);
-       return 0;
+       return -EFAULT;
 
 } /* end setup_rt_frame() */
 
@@ -516,7 +464,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
        else
                ret = setup_frame(sig, ka, oldset);
 
-       if (ret) {
+       if (ret == 0) {
                spin_lock_irq(&current->sighand->siglock);
                sigorsets(&current->blocked, &current->blocked,
                          &ka->sa.sa_mask);
@@ -536,10 +484,11 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-static int do_signal(sigset_t *oldset)
+static void do_signal(void)
 {
        struct k_sigaction ka;
        siginfo_t info;
+       sigset_t *oldset;
        int signr;
 
        /*
@@ -549,43 +498,62 @@ static int do_signal(sigset_t *oldset)
         * if so.
         */
        if (!user_mode(__frame))
-               return 1;
+               return;
 
        if (try_to_freeze())
                goto no_signal;
 
-       if (!oldset)
+       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+               oldset = &current->saved_sigmask;
+       else
                oldset = &current->blocked;
 
        signr = get_signal_to_deliver(&info, &ka, __frame, NULL);
-       if (signr > 0)
-               return handle_signal(signr, &info, &ka, oldset);
+       if (signr > 0) {
+               if (handle_signal(signr, &info, &ka, oldset) == 0) {
+                       /* a signal was successfully delivered; the saved
+                        * sigmask will have been stored in the signal frame,
+                        * and will be restored by sigreturn, so we can simply
+                        * clear the TIF_RESTORE_SIGMASK flag */
+                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+                               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               }
+
+               return;
+       }
 
 no_signal:
        /* Did we come from a system call? */
        if (__frame->syscallno >= 0) {
                /* Restart the system call - no handlers present */
-               if (__frame->gr8 == -ERESTARTNOHAND ||
-                   __frame->gr8 == -ERESTARTSYS ||
-                   __frame->gr8 == -ERESTARTNOINTR) {
+               switch (__frame->gr8) {
+               case -ERESTARTNOHAND:
+               case -ERESTARTSYS:
+               case -ERESTARTNOINTR:
                        __frame->gr8 = __frame->orig_gr8;
                        __frame->pc -= 4;
-               }
+                       break;
 
-               if (__frame->gr8 == -ERESTART_RESTARTBLOCK){
+               case -ERESTART_RESTARTBLOCK:
                        __frame->gr8 = __NR_restart_syscall;
                        __frame->pc -= 4;
+                       break;
                }
        }
 
-       return 0;
+       /* if there's no signal to deliver, we just put the saved sigmask
+        * back */
+       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+       }
 
 } /* end do_signal() */
 
 /*****************************************************************************/
 /*
  * notification of userspace execution resumption
- * - triggered by current->work.notify_resume
+ * - triggered by the TIF_WORK_MASK flags
  */
 asmlinkage void do_notify_resume(__u32 thread_info_flags)
 {
@@ -594,7 +562,7 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags)
                clear_thread_flag(TIF_SINGLESTEP);
 
        /* deal with pending signal delivery */
-       if (thread_info_flags & _TIF_SIGPENDING)
-               do_signal(NULL);
+       if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+               do_signal();
 
 } /* end do_notify_resume() */
index 6a431b9260190f52f45b5c463189b104f7e108b8..3cbe6e9cb9fcb3799f573fb8799d1f0380fe6155 100644 (file)
@@ -644,6 +644,8 @@ CONFIG_8139TOO_PIO=y
 # CONFIG_ACENIC is not set
 # CONFIG_DL2K is not set
 # CONFIG_E1000 is not set
+# CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 0f1eb507233b701c6bb7b158104e0e5cfd9e4d85..26892d2099b0278766b993cbde60d17ef2fcb534 100644 (file)
@@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI
 
 config X86_GX_SUSPMOD
        tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
+       depends on PCI
        help
         This add the CPUFreq driver for NatSemi Geode processors which
         support suspend modulation.
index 270f2188d68b1857d3ef59c3a71adf7f106de2c4..cc73a7ae34bc3f92f7177bd606b698bcc192e28b 100644 (file)
@@ -52,6 +52,7 @@ enum {
 
 
 static int has_N44_O17_errata[NR_CPUS];
+static int has_N60_errata[NR_CPUS];
 static unsigned int stock_freq;
 static struct cpufreq_driver p4clockmod_driver;
 static unsigned int cpufreq_p4_get(unsigned int cpu);
@@ -226,6 +227,12 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
        case 0x0f12:
                has_N44_O17_errata[policy->cpu] = 1;
                dprintk("has errata -- disabling low frequencies\n");
+               break;
+
+       case 0x0f29:
+               has_N60_errata[policy->cpu] = 1;
+               dprintk("has errata -- disabling frequencies lower than 2ghz\n");
+               break;
        }
        
        /* get max frequency */
@@ -237,6 +244,8 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
        for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
                if ((i<2) && (has_N44_O17_errata[policy->cpu]))
                        p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+               else if (has_N60_errata[policy->cpu] && p4clockmod_table[i].frequency < 2000000)
+                       p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
                else
                        p4clockmod_table[i].frequency = (stock_freq * i)/8;
        }
index aaf89cb2bc5145fcc4c1b9cf426d5fdefa4ef375..87ccdac84928db35706409477a86edc49ac8c736 100644 (file)
@@ -25,8 +25,7 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
 
        /* enable access to config space*/
        pci_read_config_byte(dev, 0xf4, &config);
-       config |= 0x2;
-       pci_write_config_byte(dev, 0xf4, config);
+       pci_write_config_byte(dev, 0xf4, config|0x2);
 
        /* read xTPR register */
        raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
@@ -42,9 +41,9 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
 #endif
        }
 
-       config &= ~0x2;
-       /* disable access to config space*/
-       pci_write_config_byte(dev, 0xf4, config);
+       /* put back the original value for config space*/
+       if (!(config & 0x2))
+               pci_write_config_byte(dev, 0xf4, config);
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_E7320_MCH,  quirk_intel_irqbalance);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_E7525_MCH,  quirk_intel_irqbalance);
index adcd069db91e8cae96ec53eeb2861f9c9e8c4b92..963616d364ec48984013edb1750ce1507df4fb44 100644 (file)
 asmlinkage int
 sys_sigsuspend(int history0, int history1, old_sigset_t mask)
 {
-       struct pt_regs * regs = (struct pt_regs *) &history0;
-       sigset_t saveset;
-
        mask &= _BLOCKABLE;
        spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
+       current->saved_sigmask = current->blocked;
        siginitset(&current->blocked, mask);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       regs->eax = -EINTR;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (do_signal(regs, &saveset))
-                       return -EINTR;
-       }
-}
-
-asmlinkage int
-sys_rt_sigsuspend(struct pt_regs regs)
-{
-       sigset_t saveset, newset;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (regs.ecx != sizeof(sigset_t))
-               return -EINVAL;
-
-       if (copy_from_user(&newset, (sigset_t __user *)regs.ebx, sizeof(newset)))
-               return -EFAULT;
-       sigdelsetmask(&newset, ~_BLOCKABLE);
-
-       spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       regs.eax = -EINTR;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (do_signal(&regs, &saveset))
-                       return -EINTR;
-       }
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+       set_thread_flag(TIF_RESTORE_SIGMASK);
+       return -ERESTARTNOHAND;
 }
 
 asmlinkage int 
@@ -433,11 +399,11 @@ static int setup_frame(int sig, struct k_sigaction *ka,
                current->comm, current->pid, frame, regs->eip, frame->pretcode);
 #endif
 
-       return 1;
+       return 0;
 
 give_sigsegv:
        force_sigsegv(sig, current);
-       return 0;
+       return -EFAULT;
 }
 
 static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
@@ -527,11 +493,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                current->comm, current->pid, frame, regs->eip, frame->pretcode);
 #endif
 
-       return 1;
+       return 0;
 
 give_sigsegv:
        force_sigsegv(sig, current);
-       return 0;
+       return -EFAULT;
 }
 
 /*
@@ -581,7 +547,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
        else
                ret = setup_frame(sig, ka, oldset, regs);
 
-       if (ret) {
+       if (ret == 0) {
                spin_lock_irq(&current->sighand->siglock);
                sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
                if (!(ka->sa.sa_flags & SA_NODEFER))
@@ -598,11 +564,12 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
+static void fastcall do_signal(struct pt_regs *regs)
 {
        siginfo_t info;
        int signr;
        struct k_sigaction ka;
+       sigset_t *oldset;
 
        /*
         * We want the common case to go fast, which
@@ -613,12 +580,14 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
         * CS suffices.
         */
        if (!user_mode(regs))
-               return 1;
+               return;
 
        if (try_to_freeze())
                goto no_signal;
 
-       if (!oldset)
+       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+               oldset = &current->saved_sigmask;
+       else
                oldset = &current->blocked;
 
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -628,38 +597,55 @@ int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
                 * have been cleared if the watchpoint triggered
                 * inside the kernel.
                 */
-               if (unlikely(current->thread.debugreg[7])) {
+               if (unlikely(current->thread.debugreg[7]))
                        set_debugreg(current->thread.debugreg[7], 7);
-               }
 
                /* Whee!  Actually deliver the signal.  */
-               return handle_signal(signr, &info, &ka, oldset, regs);
+               if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
+                       /* a signal was successfully delivered; the saved
+                        * sigmask will have been stored in the signal frame,
+                        * and will be restored by sigreturn, so we can simply
+                        * clear the TIF_RESTORE_SIGMASK flag */
+                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+                               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               }
+
+               return;
        }
 
- no_signal:
+no_signal:
        /* Did we come from a system call? */
        if (regs->orig_eax >= 0) {
                /* Restart the system call - no handlers present */
-               if (regs->eax == -ERESTARTNOHAND ||
-                   regs->eax == -ERESTARTSYS ||
-                   regs->eax == -ERESTARTNOINTR) {
+               switch (regs->eax) {
+               case -ERESTARTNOHAND:
+               case -ERESTARTSYS:
+               case -ERESTARTNOINTR:
                        regs->eax = regs->orig_eax;
                        regs->eip -= 2;
-               }
-               if (regs->eax == -ERESTART_RESTARTBLOCK){
+                       break;
+
+               case -ERESTART_RESTARTBLOCK:
                        regs->eax = __NR_restart_syscall;
                        regs->eip -= 2;
+                       break;
                }
        }
-       return 0;
+
+       /* if there's no signal to deliver, we just put the saved sigmask
+        * back */
+       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+       }
 }
 
 /*
  * notification of userspace execution resumption
- * - triggered by current->work.notify_resume
+ * - triggered by the TIF_WORK_MASK flags
  */
 __attribute__((regparm(3)))
-void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
+void do_notify_resume(struct pt_regs *regs, void *_unused,
                      __u32 thread_info_flags)
 {
        /* Pending single-step? */
@@ -667,9 +653,10 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
                regs->eflags |= TF_MASK;
                clear_thread_flag(TIF_SINGLESTEP);
        }
+
        /* deal with pending signal delivery */
-       if (thread_info_flags & _TIF_SIGPENDING)
-               do_signal(regs,oldset);
+       if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+               do_signal(regs);
        
        clear_thread_flag(TIF_IRET);
 }
index 6ff3e524322672a9e0c8b90935a1b439f6c2375a..1b665928336bd547c166e6aff4a11b4d786bf620 100644 (file)
@@ -294,3 +294,18 @@ ENTRY(sys_call_table)
        .long sys_inotify_add_watch
        .long sys_inotify_rm_watch
        .long sys_migrate_pages
+       .long sys_openat                /* 295 */
+       .long sys_mkdirat
+       .long sys_mknodat
+       .long sys_fchownat
+       .long sys_futimesat
+       .long sys_newfstatat            /* 300 */
+       .long sys_unlinkat
+       .long sys_renameat
+       .long sys_linkat
+       .long sys_symlinkat
+       .long sys_readlinkat            /* 305 */
+       .long sys_fchmodat
+       .long sys_faccessat
+       .long sys_pselect6
+       .long sys_ppoll
index 1d07d8072ec21aebbc5fc2c21318c8bccb98d77f..991c07b57c24c4d62ccc1a4c1a171987970ca149 100644 (file)
@@ -557,6 +557,7 @@ CONFIG_E100=m
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index b1e8f09e9fd5654f46b3736156741b397bf8a2a9..6859119bc9ddd57eff42d325c3d4d6dfa3502e63 100644 (file)
@@ -565,6 +565,7 @@ CONFIG_E100=m
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 0856ca67dd502e5023bbbe9659e61792b1f4c045..53899dc8eb531b4243c5734dc618b79bfe2d1b50 100644 (file)
@@ -548,6 +548,7 @@ CONFIG_E100=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 275a26c6e5aa5eec4ef4d3c8c15ccf62ca6dd861..dcbc78a4cfa404b9b84acebba6f377fbbe0cc7e1 100644 (file)
@@ -565,6 +565,7 @@ CONFIG_E100=m
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 3945d378bd7ed913f7c530dc3135828580ff50bc..70dba1f0e2ee916a7cd032080c96b69e0896ff04 100644 (file)
@@ -52,9 +52,9 @@
 #include <linux/compat.h>
 #include <linux/vfs.h>
 #include <linux/mman.h>
+#include <linux/mutex.h>
 
 #include <asm/intrinsics.h>
-#include <asm/semaphore.h>
 #include <asm/types.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -86,7 +86,7 @@
  * while doing so.
  */
 /* XXX make per-mm: */
-static DECLARE_MUTEX(ia32_mmap_sem);
+static DEFINE_MUTEX(ia32_mmap_mutex);
 
 asmlinkage long
 sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
@@ -895,11 +895,11 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot
        prot = get_prot32(prot);
 
 #if PAGE_SHIFT > IA32_PAGE_SHIFT
-       down(&ia32_mmap_sem);
+       mutex_lock(&ia32_mmap_mutex);
        {
                addr = emulate_mmap(file, addr, len, prot, flags, offset);
        }
-       up(&ia32_mmap_sem);
+       mutex_unlock(&ia32_mmap_mutex);
 #else
        down_write(&current->mm->mmap_sem);
        {
@@ -1000,11 +1000,9 @@ sys32_munmap (unsigned int start, unsigned int len)
        if (start >= end)
                return 0;
 
-       down(&ia32_mmap_sem);
-       {
-               ret = sys_munmap(start, end - start);
-       }
-       up(&ia32_mmap_sem);
+       mutex_lock(&ia32_mmap_mutex);
+       ret = sys_munmap(start, end - start);
+       mutex_unlock(&ia32_mmap_mutex);
 #endif
        return ret;
 }
@@ -1056,7 +1054,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot)
        if (retval < 0)
                return retval;
 
-       down(&ia32_mmap_sem);
+       mutex_lock(&ia32_mmap_mutex);
        {
                if (offset_in_page(start)) {
                        /* start address is 4KB aligned but not page aligned. */
@@ -1080,7 +1078,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot)
                retval = sys_mprotect(start, end - start, prot);
        }
   out:
-       up(&ia32_mmap_sem);
+       mutex_unlock(&ia32_mmap_mutex);
        return retval;
 #endif
 }
@@ -1124,11 +1122,9 @@ sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
        old_len = PAGE_ALIGN(old_end) - addr;
        new_len = PAGE_ALIGN(new_end) - addr;
 
-       down(&ia32_mmap_sem);
-       {
-               ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
-       }
-       up(&ia32_mmap_sem);
+       mutex_lock(&ia32_mmap_mutex);
+       ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
+       mutex_unlock(&ia32_mmap_mutex);
 
        if ((ret >= 0) && (old_len < new_len)) {
                /* mremap expanded successfully */
index 403a80a58c13bf9ef89118ae4f4b7063d28df7e0..60a464bfd9e27ce2afe46bb0de051806b9b0cbfa 100644 (file)
@@ -512,7 +512,7 @@ ia64_state_save:
        st8 [temp1]=r12         // os_status, default is cold boot
        mov r6=IA64_MCA_SAME_CONTEXT
        ;;
-       st8 [temp1]=r6          // context, default is same context
+       st8 [temp2]=r6          // context, default is same context
 
        // Save the pt_regs data that is not in minstate.  The previous code
        // left regs at sos.
index 2ea4b39efffaf5a617474948d8eb7800400330ef..9c5194b385dab33ce122152ee665306fe2cbf761 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/bitops.h>
 #include <linux/capability.h>
 #include <linux/rcupdate.h>
+#include <linux/completion.h>
 
 #include <asm/errno.h>
 #include <asm/intrinsics.h>
@@ -286,7 +287,7 @@ typedef struct pfm_context {
 
        unsigned long           ctx_ovfl_regs[4];       /* which registers overflowed (notification) */
 
-       struct semaphore        ctx_restart_sem;        /* use for blocking notification mode */
+       struct completion       ctx_restart_done;       /* use for blocking notification mode */
 
        unsigned long           ctx_used_pmds[4];       /* bitmask of PMD used            */
        unsigned long           ctx_all_pmds[4];        /* bitmask of all accessible PMDs */
@@ -1991,7 +1992,7 @@ pfm_close(struct inode *inode, struct file *filp)
                /*
                 * force task to wake up from MASKED state
                 */
-               up(&ctx->ctx_restart_sem);
+               complete(&ctx->ctx_restart_done);
 
                DPRINT(("waking up ctx_state=%d\n", state));
 
@@ -2706,7 +2707,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
        /*
         * init restart semaphore to locked
         */
-       sema_init(&ctx->ctx_restart_sem, 0);
+       init_completion(&ctx->ctx_restart_done);
 
        /*
         * activation is used in SMP only
@@ -3687,7 +3688,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
         */
        if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
                DPRINT(("unblocking [%d] \n", task->pid));
-               up(&ctx->ctx_restart_sem);
+               complete(&ctx->ctx_restart_done);
        } else {
                DPRINT(("[%d] armed exit trap\n", task->pid));
 
@@ -5089,7 +5090,7 @@ pfm_handle_work(void)
         * may go through without blocking on SMP systems
         * if restart has been received already by the time we call down()
         */
-       ret = down_interruptible(&ctx->ctx_restart_sem);
+       ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
 
        DPRINT(("after block sleeping ret=%d\n", ret));
 
index 43b45b65ee5a9da2c446d712b67ba152f1e4fce0..f9e0ae936d1a9e278ea56bb9af1d35ae8159df26 100644 (file)
@@ -1283,8 +1283,9 @@ within_logging_rate_limit (void)
 
        if (jiffies - last_time > 5*HZ)
                count = 0;
-       if (++count < 5) {
+       if (count < 5) {
                last_time = jiffies;
+               count++;
                return 1;
        }
        return 0;
index b631cf86ed445b60d4417b2ad37f8cfec506744d..fcd2bad0286fc943b77a3c274a4beb4eca269540 100644 (file)
@@ -210,6 +210,7 @@ uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
 
        dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
 
+       touch_softlockup_watchdog();
        memset((char *)start, 0, length);
 
        node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET);
index 7c88e9a585169cfa89f970178213b59c3b28078f..8182583c762c6a4928857373041cb900779044d0 100644 (file)
@@ -51,6 +51,15 @@ struct sn_flush_device_kernel {
        struct sn_flush_device_common *common;
 };
 
+/* 01/16/06 This struct is the old PROM/kernel struct and needs to be included
+ * for older official PROMs to function on the new kernel base.  This struct
+ * will be removed when the next official PROM release occurs. */
+
+struct sn_flush_device_war {
+       struct sn_flush_device_common common;
+       u32 filler; /* older PROMs expect the default size of a spinlock_t */
+};
+
 /*
  * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel.
  */
index 233d55115d33013defe9b266d474c04d3171d2bb..a4c78152b3366568edf0949935765a11a63a355f 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/nodemask.h>
 #include <asm/sn/types.h>
 #include <asm/sn/addrs.h>
+#include <asm/sn/sn_feature_sets.h>
 #include <asm/sn/geo.h>
 #include <asm/sn/io.h>
 #include <asm/sn/pcibr_provider.h>
@@ -165,8 +166,46 @@ sn_pcidev_info_get(struct pci_dev *dev)
        return NULL;
 }
 
+/* Older PROM flush WAR
+ *
+ * 01/16/06 -- This war will be in place until a new official PROM is released.
+ * Additionally note that the struct sn_flush_device_war also has to be
+ * removed from arch/ia64/sn/include/xtalk/hubdev.h
+ */
+static u8 war_implemented = 0;
+
+static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
+                              struct sn_flush_device_common *common)
+{
+       struct sn_flush_device_war *war_list;
+       struct sn_flush_device_war *dev_entry;
+       struct ia64_sal_retval isrv = {0,0,0,0};
+
+       if (!war_implemented) {
+               printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
+                      "PROM flush WAR\n");
+               war_implemented = 1;
+       }
+
+       war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
+       if (!war_list)
+               BUG();
+
+       SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
+                       nasid, widget, __pa(war_list), 0, 0, 0 ,0);
+       if (isrv.status)
+               panic("sn_device_fixup_war failed: %s\n",
+                     ia64_sal_strerror(isrv.status));
+
+       dev_entry = war_list + device;
+       memcpy(common,dev_entry, sizeof(*common));
+       kfree(war_list);
+
+       return isrv.status;
+}
+
 /*
- * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for 
+ * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
  *     each node in the system.
  */
 static void sn_fixup_ionodes(void)
@@ -242,12 +281,21 @@ static void sn_fixup_ionodes(void)
                                memset(dev_entry->common, 0x0, sizeof(struct
                                                       sn_flush_device_common));
 
-                               status = sal_get_device_dmaflush_list(nasid,
-                                                                       widget,
-                                                                       device,
+                               if (sn_prom_feature_available(
+                                                      PRF_DEVICE_FLUSH_LIST))
+                                       status = sal_get_device_dmaflush_list(
+                                                                         nasid,
+                                                                        widget,
+                                                                        device,
                                                      (u64)(dev_entry->common));
-                               if (status)
-                                       BUG();
+                               else
+                                       status = sn_device_fixup_war(nasid,
+                                                                    widget,
+                                                                    device,
+                                                            dev_entry->common);
+                               if (status != SALRET_OK)
+                                       panic("SAL call failed: %s\n",
+                                             ia64_sal_strerror(status));
 
                                spin_lock_init(&dev_entry->sfdl_flush_lock);
                        }
index 6546db6abdba4b7c93c60b6ff7d72a8f1a7c8363..9ab684d1bb552ae963379a5b4afb91beed33d4da 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/timer.h>
 #include <linux/vmalloc.h>
+#include <linux/mutex.h>
 #include <asm/mca.h>
 #include <asm/sal.h>
 #include <asm/sn/sn_sal.h>
@@ -27,7 +28,7 @@ void sn_init_cpei_timer(void);
 /* Printing oemdata from mca uses data that is not passed through SAL, it is
  * global.  Only one user at a time.
  */
-static DECLARE_MUTEX(sn_oemdata_mutex);
+static DEFINE_MUTEX(sn_oemdata_mutex);
 static u8 **sn_oemdata;
 static u64 *sn_oemdata_size, sn_oemdata_bufsize;
 
@@ -89,7 +90,7 @@ static int
 sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
                                    u64 * oemdata_size)
 {
-       down(&sn_oemdata_mutex);
+       mutex_lock(&sn_oemdata_mutex);
        sn_oemdata = oemdata;
        sn_oemdata_size = oemdata_size;
        sn_oemdata_bufsize = 0;
@@ -107,7 +108,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
                *sn_oemdata_size = 0;
                ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
        }
-       up(&sn_oemdata_mutex);
+       mutex_unlock(&sn_oemdata_mutex);
        return 0;
 }
 
index 3be52a34c80f49cd60d051c469bd2cc02b571d57..b7ea46645e12ee32d594f2ce1a215e0ac0f8ea7f 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/sn_sal.h>
 #include <asm/sn/xp.h>
@@ -136,13 +137,13 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
 
        registration = &xpc_registrations[ch_number];
 
-       if (down_interruptible(&registration->sema) != 0) {
+       if (mutex_lock_interruptible(&registration->mutex) != 0) {
                return xpcInterrupted;
        }
 
        /* if XPC_CHANNEL_REGISTERED(ch_number) */
        if (registration->func != NULL) {
-               up(&registration->sema);
+               mutex_unlock(&registration->mutex);
                return xpcAlreadyRegistered;
        }
 
@@ -154,7 +155,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
        registration->key = key;
        registration->func = func;
 
-       up(&registration->sema);
+       mutex_unlock(&registration->mutex);
 
        xpc_interface.connect(ch_number);
 
@@ -190,11 +191,11 @@ xpc_disconnect(int ch_number)
         * figured XPC's users will just turn around and call xpc_disconnect()
         * again anyways, so we might as well wait, if need be.
         */
-       down(&registration->sema);
+       mutex_lock(&registration->mutex);
 
        /* if !XPC_CHANNEL_REGISTERED(ch_number) */
        if (registration->func == NULL) {
-               up(&registration->sema);
+               mutex_unlock(&registration->mutex);
                return;
        }
 
@@ -208,7 +209,7 @@ xpc_disconnect(int ch_number)
 
        xpc_interface.disconnect(ch_number);
 
-       up(&registration->sema);
+       mutex_unlock(&registration->mutex);
 
        return;
 }
@@ -250,9 +251,9 @@ xp_init(void)
                xp_nofault_PIOR_target = SH1_IPI_ACCESS;
        }
 
-       /* initialize the connection registration semaphores */
+       /* initialize the connection registration mutex */
        for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
-               sema_init(&xpc_registrations[ch_number].sema, 1);  /* mutex */
+               mutex_init(&xpc_registrations[ch_number].mutex);
        }
 
        return 0;
index 0c0a6890240996e97b3b5960302219014b616902..36e5437a0fb6bdc281e104340b4b9ec8f4358a3a 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/cache.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
 #include <asm/sn/bte.h>
 #include <asm/sn/sn_sal.h>
 #include <asm/sn/xpc.h>
@@ -56,8 +58,8 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
                atomic_set(&ch->n_to_notify, 0);
 
                spin_lock_init(&ch->lock);
-               sema_init(&ch->msg_to_pull_sema, 1);    /* mutex */
-               sema_init(&ch->wdisconnect_sema, 0);    /* event wait */
+               mutex_init(&ch->msg_to_pull_mutex);
+               init_completion(&ch->wdisconnect_wait);
 
                atomic_set(&ch->n_on_msg_allocate_wq, 0);
                init_waitqueue_head(&ch->msg_allocate_wq);
@@ -445,7 +447,7 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
 
                nbytes = nentries * ch->msg_size;
                ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
-                                               (GFP_KERNEL | GFP_DMA),
+                                               GFP_KERNEL,
                                                &ch->local_msgqueue_base);
                if (ch->local_msgqueue == NULL) {
                        continue;
@@ -453,7 +455,7 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
                memset(ch->local_msgqueue, 0, nbytes);
 
                nbytes = nentries * sizeof(struct xpc_notify);
-               ch->notify_queue = kmalloc(nbytes, (GFP_KERNEL | GFP_DMA));
+               ch->notify_queue = kmalloc(nbytes, GFP_KERNEL);
                if (ch->notify_queue == NULL) {
                        kfree(ch->local_msgqueue_base);
                        ch->local_msgqueue = NULL;
@@ -500,7 +502,7 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
 
                nbytes = nentries * ch->msg_size;
                ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
-                                               (GFP_KERNEL | GFP_DMA),
+                                               GFP_KERNEL,
                                                &ch->remote_msgqueue_base);
                if (ch->remote_msgqueue == NULL) {
                        continue;
@@ -534,7 +536,6 @@ static enum xpc_retval
 xpc_allocate_msgqueues(struct xpc_channel *ch)
 {
        unsigned long irq_flags;
-       int i;
        enum xpc_retval ret;
 
 
@@ -552,11 +553,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
                return ret;
        }
 
-       for (i = 0; i < ch->local_nentries; i++) {
-               /* use a semaphore as an event wait queue */
-               sema_init(&ch->notify_queue[i].sema, 0);
-       }
-
        spin_lock_irqsave(&ch->lock, irq_flags);
        ch->flags |= XPC_C_SETUP;
        spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -799,10 +795,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
        }
 
        if (ch->flags & XPC_C_WDISCONNECT) {
-               spin_unlock_irqrestore(&ch->lock, *irq_flags);
-               up(&ch->wdisconnect_sema);
-               spin_lock_irqsave(&ch->lock, *irq_flags);
-
+               /* we won't lose the CPU since we're holding ch->lock */
+               complete(&ch->wdisconnect_wait);
        } else if (ch->delayed_IPI_flags) {
                if (part->act_state != XPC_P_DEACTIVATING) {
                        /* time to take action on any delayed IPI flags */
@@ -1092,12 +1086,12 @@ xpc_connect_channel(struct xpc_channel *ch)
        struct xpc_registration *registration = &xpc_registrations[ch->number];
 
 
-       if (down_trylock(&registration->sema) != 0) {
+       if (mutex_trylock(&registration->mutex) == 0) {
                return xpcRetry;
        }
 
        if (!XPC_CHANNEL_REGISTERED(ch->number)) {
-               up(&registration->sema);
+               mutex_unlock(&registration->mutex);
                return xpcUnregistered;
        }
 
@@ -1108,7 +1102,7 @@ xpc_connect_channel(struct xpc_channel *ch)
 
        if (ch->flags & XPC_C_DISCONNECTING) {
                spin_unlock_irqrestore(&ch->lock, irq_flags);
-               up(&registration->sema);
+               mutex_unlock(&registration->mutex);
                return ch->reason;
        }
 
@@ -1140,7 +1134,7 @@ xpc_connect_channel(struct xpc_channel *ch)
                         * channel lock be locked and will unlock and relock
                         * the channel lock as needed.
                         */
-                       up(&registration->sema);
+                       mutex_unlock(&registration->mutex);
                        XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
                                                                &irq_flags);
                        spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -1155,7 +1149,7 @@ xpc_connect_channel(struct xpc_channel *ch)
                atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
        }
 
-       up(&registration->sema);
+       mutex_unlock(&registration->mutex);
 
 
        /* initiate the connection */
@@ -2089,7 +2083,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
        enum xpc_retval ret;
 
 
-       if (down_interruptible(&ch->msg_to_pull_sema) != 0) {
+       if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
                /* we were interrupted by a signal */
                return NULL;
        }
@@ -2125,7 +2119,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
 
                        XPC_DEACTIVATE_PARTITION(part, ret);
 
-                       up(&ch->msg_to_pull_sema);
+                       mutex_unlock(&ch->msg_to_pull_mutex);
                        return NULL;
                }
 
@@ -2134,7 +2128,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
                ch->next_msg_to_pull += nmsgs;
        }
 
-       up(&ch->msg_to_pull_sema);
+       mutex_unlock(&ch->msg_to_pull_mutex);
 
        /* return the message we were looking for */
        msg_offset = (get % ch->remote_nentries) * ch->msg_size;
index 8930586e0eb4203c64b85201b5b2ab7310bd3166..c75f8aeefc2b432e5a2a2d2f6d01ade7089aff3a 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/reboot.h>
+#include <linux/completion.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/sn_sal.h>
 #include <asm/kdebug.h>
@@ -177,10 +178,10 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
 static unsigned long xpc_hb_check_timeout;
 
 /* notification that the xpc_hb_checker thread has exited */
-static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited);
+static DECLARE_COMPLETION(xpc_hb_checker_exited);
 
 /* notification that the xpc_discovery thread has exited */
-static DECLARE_MUTEX_LOCKED(xpc_discovery_exited);
+static DECLARE_COMPLETION(xpc_discovery_exited);
 
 
 static struct timer_list xpc_hb_timer;
@@ -321,7 +322,7 @@ xpc_hb_checker(void *ignore)
 
 
        /* mark this thread as having exited */
-       up(&xpc_hb_checker_exited);
+       complete(&xpc_hb_checker_exited);
        return 0;
 }
 
@@ -341,7 +342,7 @@ xpc_initiate_discovery(void *ignore)
        dev_dbg(xpc_part, "discovery thread is exiting\n");
 
        /* mark this thread as having exited */
-       up(&xpc_discovery_exited);
+       complete(&xpc_discovery_exited);
        return 0;
 }
 
@@ -893,7 +894,7 @@ xpc_disconnect_wait(int ch_number)
                        continue;
                }
 
-               (void) down(&ch->wdisconnect_sema);
+               wait_for_completion(&ch->wdisconnect_wait);
 
                spin_lock_irqsave(&ch->lock, irq_flags);
                DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
@@ -946,10 +947,10 @@ xpc_do_exit(enum xpc_retval reason)
        free_irq(SGI_XPC_ACTIVATE, NULL);
 
        /* wait for the discovery thread to exit */
-       down(&xpc_discovery_exited);
+       wait_for_completion(&xpc_discovery_exited);
 
        /* wait for the heartbeat checker thread to exit */
-       down(&xpc_hb_checker_exited);
+       wait_for_completion(&xpc_hb_checker_exited);
 
 
        /* sleep for a 1/3 of a second or so */
@@ -1367,7 +1368,7 @@ xpc_init(void)
                dev_err(xpc_part, "failed while forking discovery thread\n");
 
                /* mark this new thread as a non-starter */
-               up(&xpc_discovery_exited);
+               complete(&xpc_discovery_exited);
 
                xpc_do_exit(xpcUnloading);
                return -EBUSY;
index 9bf9f23b9a1f50b8384127305160779763d48c4b..5a36292388eb79d420db590c0ab0655bd9649afa 100644 (file)
@@ -90,14 +90,14 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
         */
        node = pcibus_to_node(pdev->bus);
        if (likely(node >=0)) {
-               struct page *p = alloc_pages_node(node, GFP_ATOMIC, get_order(size));
+               struct page *p = alloc_pages_node(node, flags, get_order(size));
 
                if (likely(p))
                        cpuaddr = page_address(p);
                else
                        return NULL;
        } else
-               cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size));
+               cpuaddr = (void *)__get_free_pages(flags, get_order(size));
 
        if (unlikely(!cpuaddr))
                return NULL;
index 77a1262751d31ac57567423ca1fc39145ecb3f84..2fac27049bf62eda7323fee737217db830250fc8 100644 (file)
@@ -24,13 +24,15 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
 {
        struct ia64_sal_retval ret_stuff;
        u64 busnum;
+       u64 segment;
 
        ret_stuff.status = 0;
        ret_stuff.v0 = 0;
 
+       segment = soft->pbi_buscommon.bs_persist_segment;
        busnum = soft->pbi_buscommon.bs_persist_busnum;
-       SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, (u64) busnum,
-                       (u64) device, (u64) resp, 0, 0, 0, 0);
+       SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
+                       busnum, (u64) device, (u64) resp, 0, 0, 0);
 
        return (int)ret_stuff.v0;
 }
@@ -41,14 +43,16 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
 {
        struct ia64_sal_retval ret_stuff;
        u64 busnum;
+       u64 segment;
 
        ret_stuff.status = 0;
        ret_stuff.v0 = 0;
 
+       segment = soft->pbi_buscommon.bs_persist_segment;
        busnum = soft->pbi_buscommon.bs_persist_busnum;
        SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE,
-                       (u64) busnum, (u64) device, (u64) action,
-                       (u64) resp, 0, 0, 0);
+                       segment, busnum, (u64) device, (u64) action,
+                       (u64) resp, 0, 0);
 
        return (int)ret_stuff.v0;
 }
index 955ef5084f3ed5322d3998957e7d98e183d70f5a..959ad3c4e37218a00497744c9fbd151e82e4c4f8 100644 (file)
@@ -602,6 +602,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=m
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 9d86b6b1ebd1e504a4657c5d36796eaa1d53f620..0b1c8c1fa8a33a504542af00acb3ba16115e8da6 100644 (file)
@@ -626,6 +626,7 @@ CONFIG_ACENIC=m
 # CONFIG_DL2K is not set
 CONFIG_E1000=m
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index b657f7e447624b28424dbf35d6fb342a6d1e8a9d..063b84f2cbeaac3a5f26ac48b555728ec7889f22 100644 (file)
@@ -533,6 +533,7 @@ CONFIG_MII=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=m
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 3c22ccb185194bbb782e6645fdad164658312fc9..d6fed3f56580b12a3440e9095eb22632d4d8eaa2 100644 (file)
@@ -675,6 +675,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 751a622fb7a787db68ab68f8a8c7df308218324a..c775027947f90ae7e107709dfda64734cbe853ad 100644 (file)
@@ -567,6 +567,7 @@ CONFIG_ACENIC=m
 # CONFIG_DL2K is not set
 CONFIG_E1000=m
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 07b6d3d23360a1828a9e64f0ea0932d1a0f09140..68194c03f6d19cb89cd451ce1e13a9e707f03cd9 100644 (file)
@@ -454,6 +454,7 @@ CONFIG_AMD8111_ETH=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 0b2b55a79c3cd9b6ba2659cad70dd877b40530e2..6f6c6bed1aa59ebb8243d71c737c46cb4f1853e6 100644 (file)
@@ -724,6 +724,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index a50ce0fa9243862c2facd9709348042eed8a5250..aa9893a1f6e8aee5c982c88d3262d6c4c037e944 100644 (file)
@@ -671,6 +671,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index d8da2a35c0a4adf62dd6b9cbe5d91f7470fc170e..f20a67261ec730b2c98ef5f0756eaa6301a38bfd 100644 (file)
@@ -227,7 +227,7 @@ ret_from_syscall:
        MTMSRD(r10)
        lwz     r9,TI_FLAGS(r12)
        li      r8,-_LAST_ERRNO
-       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL)
+       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK)
        bne-    syscall_exit_work
        cmplw   0,r3,r8
        blt+    syscall_exit_cont
@@ -357,7 +357,7 @@ save_user_nvgprs_cont:
        lwz     r5,_MSR(r1)
        andi.   r5,r5,MSR_PR
        beq     ret_from_except
-       andi.   r0,r9,_TIF_SIGPENDING
+       andi.   r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
        beq     ret_from_except
        b       do_user_signal
 8:
@@ -683,7 +683,7 @@ user_exc_return:            /* r10 contains MSR_KERNEL here */
        /* Check current_thread_info()->flags */
        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
        lwz     r9,TI_FLAGS(r9)
-       andi.   r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL)
+       andi.   r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK)
        bne     do_work
 
 restore_user:
@@ -917,7 +917,7 @@ recheck:
        lwz     r9,TI_FLAGS(r9)
        andi.   r0,r9,_TIF_NEED_RESCHED
        bne-    do_resched
-       andi.   r0,r9,_TIF_SIGPENDING
+       andi.   r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
        beq     restore_user
 do_user_signal:                        /* r10 contains MSR_KERNEL here */
        ori     r10,r10,MSR_EE
index 5420363188660db8803f4f771aca1ffb75481cbf..388f861b8ed17dfe1e8a0d0496955e35e2254050 100644 (file)
@@ -160,7 +160,7 @@ syscall_exit:
        mtmsrd  r10,1
        ld      r9,TI_FLAGS(r12)
        li      r11,-_LAST_ERRNO
-       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR)
+       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR|_TIF_RESTORE_SIGMASK)
        bne-    syscall_exit_work
        cmpld   r3,r11
        ld      r5,_CCR(r1)
index 177bba78fb0b6974a7f733769ab1d489a7ecfcb1..3747ab0dac3fd66f1adf58b5365b62033b19ad62 100644 (file)
@@ -252,8 +252,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs);
 /*
  * Atomically swap in the new signal mask, and wait for a signal.
  */
-long sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
-              struct pt_regs *regs)
+long sys_sigsuspend(old_sigset_t mask)
 {
        sigset_t saveset;
 
@@ -264,55 +263,10 @@ long sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       regs->result = -EINTR;
-       regs->gpr[3] = EINTR;
-       regs->ccr |= 0x10000000;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (do_signal(&saveset, regs)) {
-                       set_thread_flag(TIF_RESTOREALL);
-                       return 0;
-               }
-       }
-}
-
-long sys_rt_sigsuspend(
-#ifdef CONFIG_PPC64
-               compat_sigset_t __user *unewset,
-#else
-               sigset_t __user *unewset,
-#endif
-               size_t sigsetsize, int p3, int p4,
-               int p6, int p7, struct pt_regs *regs)
-{
-       sigset_t saveset, newset;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t))
-               return -EINVAL;
-
-       if (get_sigset_t(&newset, unewset))
-               return -EFAULT;
-       sigdelsetmask(&newset, ~_BLOCKABLE);
-
-       spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       regs->result = -EINTR;
-       regs->gpr[3] = EINTR;
-       regs->ccr |= 0x10000000;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (do_signal(&saveset, regs)) {
-                       set_thread_flag(TIF_RESTOREALL);
-                       return 0;
-               }
-       }
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+       set_thread_flag(TIF_RESTORE_SIGMASK);
+       return -ERESTARTNOHAND;
 }
 
 #ifdef CONFIG_PPC32
@@ -1174,7 +1128,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
 {
        siginfo_t info;
        struct k_sigaction ka;
-       unsigned int frame, newsp;
+       unsigned int newsp;
        int signr, ret;
 
 #ifdef CONFIG_PPC32
@@ -1185,11 +1139,11 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
        }
 #endif
 
-       if (!oldset)
+       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+               oldset = &current->saved_sigmask;
+       else if (!oldset)
                oldset = &current->blocked;
 
-       newsp = frame = 0;
-
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 #ifdef CONFIG_PPC32
 no_signal:
@@ -1219,8 +1173,14 @@ no_signal:
                }
        }
 
-       if (signr == 0)
+       if (signr == 0) {
+               /* No signal to deliver -- put the saved sigmask back */
+               if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+                       clear_thread_flag(TIF_RESTORE_SIGMASK);
+                       sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+               }
                return 0;               /* no signals delivered */
+       }
 
        if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
            && !on_sig_stack(regs->gpr[1]))
@@ -1253,6 +1213,10 @@ no_signal:
                        sigaddset(&current->blocked, signr);
                recalc_sigpending();
                spin_unlock_irq(&current->sighand->siglock);
+               /* A signal was successfully delivered; the saved sigmask is in
+                  its frame, and we can clear the TIF_RESTORE_SIGMASK flag */
+               if (test_thread_flag(TIF_RESTORE_SIGMASK))
+                       clear_thread_flag(TIF_RESTORE_SIGMASK);
        }
 
        return ret;
index 7b9d999e2115027d72bec3beca14257345bbb7a4..b3193116e686c7047db7f76f085776064f625591 100644 (file)
@@ -67,42 +67,6 @@ struct rt_sigframe {
        char abigap[288];
 } __attribute__ ((aligned (16)));
 
-
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, int p3, int p4,
-                      int p6, int p7, struct pt_regs *regs)
-{
-       sigset_t saveset, newset;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t))
-               return -EINVAL;
-
-       if (copy_from_user(&newset, unewset, sizeof(newset)))
-               return -EFAULT;
-       sigdelsetmask(&newset, ~_BLOCKABLE);
-
-       spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       regs->result = -EINTR;
-       regs->gpr[3] = EINTR;
-       regs->ccr |= 0x10000000;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (do_signal(&saveset, regs)) {
-                       set_thread_flag(TIF_RESTOREALL);
-                       return 0;
-               }
-       }
-}
-
 long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r5,
                     unsigned long r6, unsigned long r7, unsigned long r8,
                     struct pt_regs *regs)
@@ -556,11 +520,15 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
        if (test_thread_flag(TIF_32BIT))
                return do_signal32(oldset, regs);
 
-       if (!oldset)
+       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+               oldset = &current->saved_sigmask;
+       else if (!oldset)
                oldset = &current->blocked;
 
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
+               int ret;
+
                /* Whee!  Actually deliver the signal.  */
                if (TRAP(regs) == 0x0C00)
                        syscall_restart(regs, &ka);
@@ -573,7 +541,14 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
                if (current->thread.dabr)
                        set_dabr(current->thread.dabr);
 
-               return handle_signal(signr, &ka, &info, oldset, regs);
+               ret = handle_signal(signr, &ka, &info, oldset, regs);
+
+               /* If a signal was successfully delivered, the saved sigmask is in
+                  its frame, and we can clear the TIF_RESTORE_SIGMASK flag */
+               if (ret && test_thread_flag(TIF_RESTORE_SIGMASK))
+                       clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+               return ret;
        }
 
        if (TRAP(regs) == 0x0C00) {     /* System Call! */
@@ -589,6 +564,11 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
                        regs->result = 0;
                }
        }
+       /* No signal to deliver -- put the saved sigmask back */
+       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+       }
 
        return 0;
 }
index 68013179a503ca5fe4c2a3bd1157e06a198d58db..007b15ee36d2f435a43dff536ea799c1a4f0e10f 100644 (file)
@@ -321,3 +321,5 @@ SYSCALL(inotify_add_watch)
 SYSCALL(inotify_rm_watch)
 SYSCALL(spu_run)
 SYSCALL(spu_create)
+COMPAT_SYS(pselect6)
+COMPAT_SYS(ppoll)
index 558dd06920921f04bde22e4084d0347ba73c8209..2296f3d46ca829ac979e92a7af0393bb422d05cf 100644 (file)
@@ -910,16 +910,18 @@ core99_gmac_phy_reset(struct device_node *node, long param, long value)
            macio->type != macio_intrepid)
                return -ENODEV;
 
+       printk(KERN_DEBUG "Hard reset of PHY chip ...\n");
+
        LOCK(flags);
        MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, KEYLARGO_GPIO_OUTPUT_ENABLE);
        (void)MACIO_IN8(KL_GPIO_ETH_PHY_RESET);
        UNLOCK(flags);
-       mdelay(10);
+       msleep(10);
        LOCK(flags);
        MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, /*KEYLARGO_GPIO_OUTPUT_ENABLE | */
                KEYLARGO_GPIO_OUTOUT_DATA);
        UNLOCK(flags);
-       mdelay(10);
+       msleep(10);
 
        return 0;
 }
index 0ba4e70d50b6e8dbde692a328aebfd5723974221..41fd3938fa5cba3f74c7d3fa252be7f67e04a8db 100644 (file)
@@ -499,6 +499,7 @@ CONFIG_NATSEMI=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 0f3bb9af9c22bcf346a8559c7f11458cd22d9060..7311fe6b42decc55c810976594745f93d6d63edc 100644 (file)
@@ -488,6 +488,7 @@ CONFIG_E100=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 673dc64ebcb1df1804480245db281c7215a836c0..b96a6d6dad0e109c58ae5a2e5c987a52c35da39b 100644 (file)
@@ -402,6 +402,7 @@ CONFIG_E100=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 93da595a4738e5b311bba6f53745111707fd9667..a1ef929bca59c4b1362c7b03d23319a39cf04310 100644 (file)
@@ -442,6 +442,7 @@ CONFIG_E100=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 94ea346b7b4b8b7def5f003d5a4dfe8ce74623b4..1f01b7e2376b97a1f258eb2c0fdcb7abe955d91f 100644 (file)
@@ -313,7 +313,7 @@ static struct platform_device mpsc1_device = {
 };
 #endif
 
-#ifdef CONFIG_MV643XX_ETH
+#if defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE)
 static struct resource mv64x60_eth_shared_resources[] = {
        [0] = {
                .name   = "ethernet shared base",
@@ -456,7 +456,7 @@ static struct platform_device *mv64x60_pd_devs[] __initdata = {
        &mpsc0_device,
        &mpsc1_device,
 #endif
-#ifdef CONFIG_MV643XX_ETH
+#if defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE)
        &mv64x60_eth_shared_device,
 #endif
 #ifdef CONFIG_MV643XX_ETH_0
index 03ecb4e4614e01858a0cfecfdfa9071ce60c7e23..267ec8f6fb585a8d5763f1eea412102686c2bbc3 100644 (file)
@@ -38,7 +38,7 @@
 
 #define curptr      g6
 
-#define NR_SYSCALLS 284      /* Each OS is different... */
+#define NR_SYSCALLS 299      /* Each OS is different... */
 
 /* These are just handy. */
 #define _SV    save    %sp, -STACKFRAME_SZ, %sp
@@ -1276,62 +1276,6 @@ sys_sigstack:
        call    do_sys_sigstack
         mov    %l5, %o7
 
-       .align  4
-       .globl  sys_sigpause
-sys_sigpause:
-       /* Note: %o0 already has correct value... */
-       call    do_sigpause
-        add    %sp, STACKFRAME_SZ, %o1
-
-       ld      [%curptr + TI_FLAGS], %l5
-       andcc   %l5, _TIF_SYSCALL_TRACE, %g0
-       be      1f
-        nop
-
-       call    syscall_trace
-        nop
-
-1:
-       /* We are returning to a signal handler. */
-       RESTORE_ALL
-
-       .align  4
-       .globl  sys_sigsuspend
-sys_sigsuspend:
-       call    do_sigsuspend
-        add    %sp, STACKFRAME_SZ, %o0
-
-       ld      [%curptr + TI_FLAGS], %l5
-       andcc   %l5, _TIF_SYSCALL_TRACE, %g0
-       be      1f
-        nop
-
-       call    syscall_trace
-        nop
-
-1:
-       /* We are returning to a signal handler. */
-       RESTORE_ALL
-
-       .align  4
-       .globl  sys_rt_sigsuspend
-sys_rt_sigsuspend:
-       /* Note: %o0, %o1 already have correct value... */
-       call    do_rt_sigsuspend
-        add    %sp, STACKFRAME_SZ, %o2
-
-       ld      [%curptr + TI_FLAGS], %l5
-       andcc   %l5, _TIF_SYSCALL_TRACE, %g0
-       be      1f
-        nop
-
-       call    syscall_trace
-        nop
-
-1:
-       /* We are returning to a signal handler. */
-       RESTORE_ALL
-
        .align  4
        .globl  sys_sigreturn
 sys_sigreturn:
index f7460d897e791899a4f7a6d542c64044d3122dd9..77ca6fd812538b2c2dae9b2c40c29f2303b6bdde 100644 (file)
@@ -68,15 +68,14 @@ ret_trap_lockless_ipi:
 
        ld      [%curptr + TI_FLAGS], %g2
 signal_p:
-       andcc   %g2, (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING), %g0
+       andcc   %g2, (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %g0
        bz,a    ret_trap_continue
         ld     [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
 
-       clr     %o0
-       mov     %l5, %o2
-       mov     %l6, %o3
+       mov     %l5, %o1
+       mov     %l6, %o2
        call    do_signal
-        add    %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
+        add    %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
 
        /* Fall through. */
        ld      [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
index 5f34d7dc2b898672a4a8bfccd40940a647af0170..0748d8147bbf670f2ff1bb7f027eb9034091f5f7 100644 (file)
@@ -35,9 +35,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
                   void *fpqueue, unsigned long *fpqdepth);
 extern void fpload(unsigned long *fpregs, unsigned long *fsr);
 
-asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
-                        unsigned long orig_o0, int restart_syscall);
-
 /* Signal frames: the original one (compatible with SunOS):
  *
  * Set up a signal frame... Make the stack look the way SunOS
@@ -95,98 +92,30 @@ struct rt_signal_frame {
 #define NF_ALIGNEDSZ  (((sizeof(struct new_signal_frame) + 7) & (~7)))
 #define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame) + 7) & (~7)))
 
-/*
- * atomically swap in the new signal mask, and wait for a signal.
- * This is really tricky on the Sparc, watch out...
- */
-asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
+static int _sigpause_common(old_sigset_t set)
 {
-       sigset_t saveset;
-
        set &= _BLOCKABLE;
        spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
+       current->saved_sigmask = current->blocked;
        siginitset(&current->blocked, set);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       regs->pc = regs->npc;
-       regs->npc += 4;
-
-       /* Condition codes and return value where set here for sigpause,
-        * and so got used by setup_frame, which again causes sigreturn()
-        * to return -EINTR.
-        */
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               /*
-                * Return -EINTR and set condition code here,
-                * so the interrupted system call actually returns
-                * these.
-                */
-               regs->psr |= PSR_C;
-               regs->u_regs[UREG_I0] = EINTR;
-               if (do_signal(&saveset, regs, 0, 0))
-                       return;
-       }
-}
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+       set_thread_flag(TIF_RESTORE_SIGMASK);
 
-asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
-{
-       _sigpause_common(set, regs);
+       return -ERESTARTNOHAND;
 }
 
-asmlinkage void do_sigsuspend (struct pt_regs *regs)
+asmlinkage int sys_sigpause(unsigned int set)
 {
-       _sigpause_common(regs->u_regs[UREG_I0], regs);
+       return _sigpause_common(set);
 }
 
-asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
-                                struct pt_regs *regs)
+asmlinkage int sys_sigsuspend(old_sigset_t set)
 {
-       sigset_t oldset, set;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t)) {
-               regs->psr |= PSR_C;
-               regs->u_regs[UREG_I0] = EINVAL;
-               return;
-       }
-
-       if (copy_from_user(&set, uset, sizeof(set))) {
-               regs->psr |= PSR_C;
-               regs->u_regs[UREG_I0] = EFAULT;
-               return;
-       }
-
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       oldset = current->blocked;
-       current->blocked = set;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       regs->pc = regs->npc;
-       regs->npc += 4;
-
-       /* Condition codes and return value where set here for sigpause,
-        * and so got used by setup_frame, which again causes sigreturn()
-        * to return -EINTR.
-        */
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               /*
-                * Return -EINTR and set condition code here,
-                * so the interrupted system call actually returns
-                * these.
-                */
-               regs->psr |= PSR_C;
-               regs->u_regs[UREG_I0] = EINTR;
-               if (do_signal(&oldset, regs, 0, 0))
-                       return;
-       }
+       return _sigpause_common(set);
 }
 
 static inline int
@@ -1067,13 +996,13 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
-                        unsigned long orig_i0, int restart_syscall)
+asmlinkage void do_signal(struct pt_regs * regs, unsigned long orig_i0, int restart_syscall)
 {
        siginfo_t info;
        struct sparc_deliver_cookie cookie;
        struct k_sigaction ka;
        int signr;
+       sigset_t *oldset;
 
        /*
         * XXX Disable svr4 signal handling until solaris emulation works.
@@ -1089,7 +1018,9 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
        cookie.restart_syscall = restart_syscall;
        cookie.orig_i0 = orig_i0;
 
-       if (!oldset)
+       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+               oldset = &current->saved_sigmask;
+       else
                oldset = &current->blocked;
 
        signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
@@ -1098,7 +1029,14 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
                        syscall_restart(cookie.orig_i0, regs, &ka.sa);
                handle_signal(signr, &ka, &info, oldset,
                              regs, svr4_signal);
-               return 1;
+               /* a signal was successfully delivered; the saved
+                * sigmask will have been stored in the signal frame,
+                * and will be restored by sigreturn, so we can simply
+                * clear the TIF_RESTORE_SIGMASK flag.
+                */
+               if (test_thread_flag(TIF_RESTORE_SIGMASK))
+                       clear_thread_flag(TIF_RESTORE_SIGMASK);
+               return;
        }
        if (cookie.restart_syscall &&
            (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
@@ -1115,7 +1053,14 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
                regs->pc -= 4;
                regs->npc -= 4;
        }
-       return 0;
+
+       /* if there's no signal to deliver, we just put the saved sigmask
+        * back
+        */
+       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+       }
 }
 
 asmlinkage int
index 0b0d492c953b0118114d925cf1672b11500f0926..19b25399d7e4219ad862863b0f941159ac195ba9 100644 (file)
@@ -66,7 +66,6 @@ struct poll {
 
 extern int svr4_getcontext (svr4_ucontext_t *, struct pt_regs *);
 extern int svr4_setcontext (svr4_ucontext_t *, struct pt_regs *);
-void _sigpause_common (unsigned int set, struct pt_regs *);
 extern void (*__copy_1page)(void *, const void *);
 extern void __memmove(void *, const void *, __kernel_size_t);
 extern void (*bzero_1page)(void *);
@@ -227,7 +226,6 @@ EXPORT_SYMBOL(kunmap_atomic);
 /* Solaris/SunOS binary compatibility */
 EXPORT_SYMBOL(svr4_setcontext);
 EXPORT_SYMBOL(svr4_getcontext);
-EXPORT_SYMBOL(_sigpause_common);
 
 EXPORT_SYMBOL(dump_thread);
 
index e457a40838fc240ffb92ff3b0d0543ca8aac6c6e..6877ae4cd1d9c75d58ba45072dd6e645fb658d4a 100644 (file)
@@ -75,7 +75,10 @@ sys_call_table:
 /*265*/        .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
 /*270*/        .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
 /*275*/        .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
-/*280*/        .long sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl
+/*280*/        .long sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl, sys_openat
+/*285*/        .long sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_newfstatat
+/*290*/        .long sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
+/*295*/        .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll
 
 #ifdef CONFIG_SUNOS_EMUL
        /* Now the SunOS syscall table. */
@@ -181,6 +184,11 @@ sunos_sys_table:
        .long sunos_nosys, sunos_nosys, sunos_nosys
        .long sunos_nosys
 /*280*/        .long sunos_nosys, sunos_nosys, sunos_nosys
+       .long sunos_nosys, sunos_nosys, sunos_nosys
+       .long sunos_nosys, sunos_nosys, sunos_nosys
        .long sunos_nosys
+/*290*/        .long sunos_nosys, sunos_nosys, sunos_nosys
+       .long sunos_nosys, sunos_nosys, sunos_nosys
+       .long sunos_nosys, sunos_nosys, sunos_nosys
 
 #endif
index be2c80932e2673c82f1d7ebbccc15090d5dc6240..8613b3eb877c738ed71adcec2281865bcef924c2 100644 (file)
@@ -323,11 +323,6 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
                case FMOVS:
                case FABSS:
                case FNEGS: TYPE(2,1,0,1,0,0,0); break;
-               default:
-#ifdef DEBUG_MATHEMU
-                       printk("unknown FPop1: %03lx\n",(insn>>5)&0x1ff);
-#endif
-                       break;
                }
        } else if ((insn & 0xc1f80000) == 0x81a80000)   /* FPOP2 */ {
                switch ((insn >> 5) & 0x1ff) {
@@ -337,11 +332,6 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
                case FCMPED: TYPE(3,0,0,2,1,2,1); break;
                case FCMPQ: TYPE(3,0,0,3,1,3,1); break;
                case FCMPEQ: TYPE(3,0,0,3,1,3,1); break;
-               default:
-#ifdef DEBUG_MATHEMU
-                       printk("unknown FPop2: %03lx\n",(insn>>5)&0x1ff);
-#endif
-                       break;
                }
        }
 
index 489bf68d5f05d49786b4f52de6356a951c9be4ad..77840c80478665058b778939b3219861d9744e5e 100644 (file)
@@ -295,8 +295,7 @@ static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus)
        int ioptex;
        int i;
 
-       if (busa < iommu->start)
-               BUG();
+       BUG_ON(busa < iommu->start);
        ioptex = (busa - iommu->start) >> PAGE_SHIFT;
        for (i = 0; i < npages; i++) {
                iopte_val(iommu->page_table[ioptex + i]) = 0;
@@ -340,9 +339,9 @@ static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
        iopte_t *first;
        int ioptex;
 
-       if ((va & ~PAGE_MASK) != 0) BUG();
-       if ((addr & ~PAGE_MASK) != 0) BUG();
-       if ((len & ~PAGE_MASK) != 0) BUG();
+       BUG_ON((va & ~PAGE_MASK) != 0);
+       BUG_ON((addr & ~PAGE_MASK) != 0);
+       BUG_ON((len & ~PAGE_MASK) != 0);
 
        /* page color = physical address */
        ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
@@ -405,8 +404,8 @@ static void iommu_unmap_dma_area(unsigned long busa, int len)
        unsigned long end;
        int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
 
-       if ((busa & ~PAGE_MASK) != 0) BUG();
-       if ((len & ~PAGE_MASK) != 0) BUG();
+       BUG_ON((busa & ~PAGE_MASK) != 0);
+       BUG_ON((len & ~PAGE_MASK) != 0);
 
        iopte += ioptex;
        end = busa + len;
index a3fb3376ffa06c744e96338d17ecc519c237c24f..9ceddad0fb494ae4e6c82630ef8171e706115c51 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.15
-# Mon Jan  9 14:36:29 2006
+# Linux kernel version: 2.6.16-rc1
+# Wed Jan 18 13:41:02 2006
 #
 CONFIG_SPARC=y
 CONFIG_SPARC64=y
@@ -233,6 +233,11 @@ CONFIG_VLAN_8021Q=m
 # CONFIG_ATALK is not set
 # CONFIG_X25 is not set
 # CONFIG_LAPB is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
 # CONFIG_NET_DIVERT is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
@@ -420,8 +425,7 @@ CONFIG_ISCSI_TCP=m
 # CONFIG_SCSI_QLOGIC_FC is not set
 # CONFIG_SCSI_QLOGIC_1280 is not set
 # CONFIG_SCSI_QLOGICPTI is not set
-CONFIG_SCSI_QLA2XXX=y
-# CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE is not set
+# CONFIG_SCSI_QLA_FC is not set
 # CONFIG_SCSI_LPFC is not set
 # CONFIG_SCSI_DC395x is not set
 # CONFIG_SCSI_DC390T is not set
@@ -529,6 +533,7 @@ CONFIG_NET_PCI=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=m
 CONFIG_E1000_NAPI=y
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_MYRI_SBUS is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
@@ -652,7 +657,6 @@ CONFIG_SERIAL_SUNSU_CONSOLE=y
 CONFIG_SERIAL_SUNSAB=m
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
-# CONFIG_SERIAL_JSM is not set
 CONFIG_UNIX98_PTYS=y
 # CONFIG_LEGACY_PTYS is not set
 
@@ -737,6 +741,12 @@ CONFIG_I2C_ALGOBIT=y
 # CONFIG_I2C_DEBUG_BUS is not set
 # CONFIG_I2C_DEBUG_CHIP is not set
 
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
 #
 # Dallas's 1-wire bus
 #
@@ -1014,6 +1024,7 @@ CONFIG_USB_UHCI_HCD=m
 #
 CONFIG_USB_HID=y
 CONFIG_USB_HIDINPUT=y
+# CONFIG_USB_HIDINPUT_POWERBOOK is not set
 # CONFIG_HID_FF is not set
 CONFIG_USB_HIDDEV=y
 # CONFIG_USB_AIPTEK is not set
@@ -1268,12 +1279,13 @@ CONFIG_KPROBES=y
 # Kernel hacking
 #
 CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_KERNEL=y
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
 CONFIG_LOG_BUF_SHIFT=18
 CONFIG_DETECT_SOFTLOCKUP=y
 CONFIG_SCHEDSTATS=y
 # CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_MUTEXES is not set
 # CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
 # CONFIG_DEBUG_KOBJECT is not set
@@ -1281,6 +1293,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
 # CONFIG_DEBUG_INFO is not set
 CONFIG_DEBUG_FS=y
 # CONFIG_DEBUG_VM is not set
+CONFIG_FORCED_INLINING=y
 # CONFIG_RCU_TORTURE_TEST is not set
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_DEBUG_DCFLUSH is not set
index 710002991888b0faf15438696e16e486adffa6c7..12911e7463f22c3b94830fc5d3d3577ff2b91670 100644 (file)
@@ -25,7 +25,7 @@
 
 #define curptr      g6
 
-#define NR_SYSCALLS 284      /* Each OS is different... */
+#define NR_SYSCALLS 299      /* Each OS is different... */
 
        .text
        .align          32
@@ -1416,7 +1416,6 @@ execve_merge:
         add            %sp, PTREGS_OFF, %o0
 
        .globl  sys_pipe, sys_sigpause, sys_nis_syscall
-       .globl  sys_sigsuspend, sys_rt_sigsuspend
        .globl  sys_rt_sigreturn
        .globl  sys_ptrace
        .globl  sys_sigaltstack
@@ -1440,28 +1439,6 @@ sys32_sigaltstack:
                 mov            %i6, %o2
 #endif
                .align          32
-sys_sigsuspend:        add             %sp, PTREGS_OFF, %o0
-               call            do_sigsuspend
-                add            %o7, 1f-.-4, %o7
-               nop
-sys_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
-               add             %sp, PTREGS_OFF, %o2
-               call            do_rt_sigsuspend
-                add            %o7, 1f-.-4, %o7
-               nop
-#ifdef CONFIG_COMPAT
-       .globl  sys32_rt_sigsuspend
-sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
-               srl             %o0, 0, %o0
-               add             %sp, PTREGS_OFF, %o2
-               call            do_rt_sigsuspend32
-                add            %o7, 1f-.-4, %o7
-#endif
-               /* NOTE: %o0 has a correct value already */
-sys_sigpause:  add             %sp, PTREGS_OFF, %o1
-               call            do_sigpause
-                add            %o7, 1f-.-4, %o7
-               nop
 #ifdef CONFIG_COMPAT
        .globl  sys32_sigreturn
 sys32_sigreturn:
index 1dc3650c5caefaee73a69e9b68b1cf9705f6af03..059b0d0252245800bf415110644a659a9f6c5a8e 100644 (file)
@@ -164,6 +164,7 @@ void machine_restart(char * cmd)
        panic("Reboot failed!");
 }
 
+#ifdef CONFIG_COMPAT
 static void show_regwindow32(struct pt_regs *regs)
 {
        struct reg_window32 __user *rw;
@@ -189,6 +190,9 @@ static void show_regwindow32(struct pt_regs *regs)
               r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
               r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
 }
+#else
+#define show_regwindow32(regs) do { } while (0)
+#endif
 
 static void show_regwindow(struct pt_regs *regs)
 {
index 090dcca00d2a1aa8de184c85b7187c79f9c35cd7..b80eba0081ca8d5b7b382adea960a0bf37239a44 100644 (file)
@@ -53,14 +53,13 @@ __handle_user_windows:
                wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
                ldx                     [%g6 + TI_FLAGS], %l0
 
-1:             andcc                   %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
+1:             andcc                   %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
                be,pt                   %xcc, __handle_user_windows_continue
                 nop
-               clr                     %o0
-               mov                     %l5, %o2
-               mov                     %l6, %o3
-               add                     %sp, PTREGS_OFF, %o1
-               mov                     %l0, %o4
+               mov                     %l5, %o1
+               mov                     %l6, %o2
+               add                     %sp, PTREGS_OFF, %o0
+               mov                     %l0, %o3
 
                call                    do_notify_resume
                 wrpr                   %g0, RTRAP_PSTATE, %pstate
@@ -96,15 +95,14 @@ __handle_perfctrs:
                 wrpr                   %g0, RTRAP_PSTATE, %pstate
                wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
                ldx                     [%g6 + TI_FLAGS], %l0
-1:             andcc                   %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
+1:             andcc                   %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
 
                be,pt                   %xcc, __handle_perfctrs_continue
                 sethi                  %hi(TSTATE_PEF), %o0
-               clr                     %o0
-               mov                     %l5, %o2
-               mov                     %l6, %o3
-               add                     %sp, PTREGS_OFF, %o1
-               mov                     %l0, %o4
+               mov                     %l5, %o1
+               mov                     %l6, %o2
+               add                     %sp, PTREGS_OFF, %o0
+               mov                     %l0, %o3
                call                    do_notify_resume
 
                 wrpr                   %g0, RTRAP_PSTATE, %pstate
@@ -129,11 +127,10 @@ __handle_userfpu:
                ba,a,pt                 %xcc, __handle_userfpu_continue
 
 __handle_signal:
-               clr                     %o0
-               mov                     %l5, %o2
-               mov                     %l6, %o3
-               add                     %sp, PTREGS_OFF, %o1
-               mov                     %l0, %o4
+               mov                     %l5, %o1
+               mov                     %l6, %o2
+               add                     %sp, PTREGS_OFF, %o0
+               mov                     %l0, %o3
                call                    do_notify_resume
                 wrpr                   %g0, RTRAP_PSTATE, %pstate
                wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
@@ -200,7 +197,7 @@ __handle_preemption_continue:
                 andcc                  %l1, %o0, %g0
                andcc                   %l0, _TIF_NEED_RESCHED, %g0
                bne,pn                  %xcc, __handle_preemption
-                andcc                  %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
+                andcc                  %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
                bne,pn                  %xcc, __handle_signal
 __handle_signal_continue:
                 ldub                   [%g6 + TI_WSAVED], %o2
index 250745896aeec4959087f544e8e46914bd6d2fdd..054461e6946d3334bfb0a3d56f878edfdae5c2b9 100644 (file)
@@ -561,6 +561,8 @@ static int __init set_preferred_console(void)
                serial_console = 1;
        } else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) {
                serial_console = 2;
+       } else if (idev == PROMDEV_IRSC && odev == PROMDEV_ORSC) {
+               serial_console = 3;
        } else {
                prom_printf("Inconsistent console: "
                            "input %d, output %d\n",
index 60f5dfabb1e173bcc08064c54cd64addf16c5030..ca11a4c457d4244d0279770ffe08343693cf726d 100644 (file)
@@ -36,9 +36,6 @@
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
-static int do_signal(sigset_t *oldset, struct pt_regs * regs,
-                    unsigned long orig_o0, int ret_from_syscall);
-
 /* {set, get}context() needed for 64-bit SparcLinux userland. */
 asmlinkage void sparc64_set_context(struct pt_regs *regs)
 {
@@ -242,114 +239,29 @@ struct rt_signal_frame {
 /* Align macros */
 #define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame) + 7) & (~7)))
 
-/*
- * atomically swap in the new signal mask, and wait for a signal.
- * This is really tricky on the Sparc, watch out...
- */
-asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
+static long _sigpause_common(old_sigset_t set)
 {
-       sigset_t saveset;
-
-#ifdef CONFIG_SPARC32_COMPAT
-       if (test_thread_flag(TIF_32BIT)) {
-               extern asmlinkage void _sigpause32_common(compat_old_sigset_t,
-                                                         struct pt_regs *);
-               _sigpause32_common(set, regs);
-               return;
-       }
-#endif
        set &= _BLOCKABLE;
        spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
+       current->saved_sigmask = current->blocked;
        siginitset(&current->blocked, set);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
-       
-       if (test_thread_flag(TIF_32BIT)) {
-               regs->tpc = (regs->tnpc & 0xffffffff);
-               regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
-       } else {
-               regs->tpc = regs->tnpc;
-               regs->tnpc += 4;
-       }
 
-       /* Condition codes and return value where set here for sigpause,
-        * and so got used by setup_frame, which again causes sigreturn()
-        * to return -EINTR.
-        */
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               /*
-                * Return -EINTR and set condition code here,
-                * so the interrupted system call actually returns
-                * these.
-                */
-               regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
-               regs->u_regs[UREG_I0] = EINTR;
-               if (do_signal(&saveset, regs, 0, 0))
-                       return;
-       }
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+       set_thread_flag(TIF_RESTORE_SIGMASK);
+       return -ERESTARTNOHAND;
 }
 
-asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
+asmlinkage long sys_sigpause(unsigned int set)
 {
-       _sigpause_common(set, regs);
+       return _sigpause_common(set);
 }
 
-asmlinkage void do_sigsuspend(struct pt_regs *regs)
+asmlinkage long sys_sigsuspend(old_sigset_t set)
 {
-       _sigpause_common(regs->u_regs[UREG_I0], regs);
-}
-
-asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, struct pt_regs *regs)
-{
-       sigset_t oldset, set;
-        
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t)) {
-               regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
-               regs->u_regs[UREG_I0] = EINVAL;
-               return;
-       }
-       if (copy_from_user(&set, uset, sizeof(set))) {
-               regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
-               regs->u_regs[UREG_I0] = EFAULT;
-               return;
-       }
-                                                                
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       oldset = current->blocked;
-       current->blocked = set;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-       
-       if (test_thread_flag(TIF_32BIT)) {
-               regs->tpc = (regs->tnpc & 0xffffffff);
-               regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
-       } else {
-               regs->tpc = regs->tnpc;
-               regs->tnpc += 4;
-       }
-
-       /* Condition codes and return value where set here for sigpause,
-        * and so got used by setup_frame, which again causes sigreturn()
-        * to return -EINTR.
-        */
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               /*
-                * Return -EINTR and set condition code here,
-                * so the interrupted system call actually returns
-                * these.
-                */
-               regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
-               regs->u_regs[UREG_I0] = EINTR;
-               if (do_signal(&oldset, regs, 0, 0))
-                       return;
-       }
+       return _sigpause_common(set);
 }
 
 static inline int
@@ -607,26 +519,29 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-static int do_signal(sigset_t *oldset, struct pt_regs * regs,
-                    unsigned long orig_i0, int restart_syscall)
+static void do_signal(struct pt_regs *regs, unsigned long orig_i0, int restart_syscall)
 {
        siginfo_t info;
        struct signal_deliver_cookie cookie;
        struct k_sigaction ka;
        int signr;
+       sigset_t *oldset;
        
        cookie.restart_syscall = restart_syscall;
        cookie.orig_i0 = orig_i0;
 
-       if (!oldset)
+       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+               oldset = &current->saved_sigmask;
+       else
                oldset = &current->blocked;
 
 #ifdef CONFIG_SPARC32_COMPAT
        if (test_thread_flag(TIF_32BIT)) {
-               extern int do_signal32(sigset_t *, struct pt_regs *,
-                                      unsigned long, int);
-               return do_signal32(oldset, regs, orig_i0,
-                                  cookie.restart_syscall);
+               extern void do_signal32(sigset_t *, struct pt_regs *,
+                                       unsigned long, int);
+               do_signal32(oldset, regs, orig_i0,
+                           cookie.restart_syscall);
+               return;
        }
 #endif 
 
@@ -635,7 +550,15 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs,
                if (cookie.restart_syscall)
                        syscall_restart(orig_i0, regs, &ka.sa);
                handle_signal(signr, &ka, &info, oldset, regs);
-               return 1;
+
+               /* a signal was successfully delivered; the saved
+                * sigmask will have been stored in the signal frame,
+                * and will be restored by sigreturn, so we can simply
+                * clear the TIF_RESTORE_SIGMASK flag.
+                */
+               if (test_thread_flag(TIF_RESTORE_SIGMASK))
+                       clear_thread_flag(TIF_RESTORE_SIGMASK);
+               return;
        }
        if (cookie.restart_syscall &&
            (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
@@ -652,15 +575,21 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs,
                regs->tpc -= 4;
                regs->tnpc -= 4;
        }
-       return 0;
+
+       /* if there's no signal to deliver, we just put the saved sigmask
+        * back
+        */
+       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+       }
 }
 
-void do_notify_resume(sigset_t *oldset, struct pt_regs *regs,
-                     unsigned long orig_i0, int restart_syscall,
+void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, int restart_syscall,
                      unsigned long thread_info_flags)
 {
-       if (thread_info_flags & _TIF_SIGPENDING)
-               do_signal(oldset, regs, orig_i0, restart_syscall);
+       if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+               do_signal(regs, orig_i0, restart_syscall);
 }
 
 void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
index 009a86e5ded48702cecd4ed4e18b8d67ea8cab70..708ba9b42cda123a4522326495e2f9118053e166 100644 (file)
@@ -32,9 +32,6 @@
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
-int do_signal32(sigset_t *oldset, struct pt_regs *regs,
-               unsigned long orig_o0, int ret_from_syscall);
-
 /* Signal frames: the original one (compatible with SunOS):
  *
  * Set up a signal frame... Make the stack look the way SunOS
@@ -226,102 +223,6 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
        return 0;
 }
 
-/*
- * atomically swap in the new signal mask, and wait for a signal.
- * This is really tricky on the Sparc, watch out...
- */
-asmlinkage void _sigpause32_common(compat_old_sigset_t set, struct pt_regs *regs)
-{
-       sigset_t saveset;
-
-       set &= _BLOCKABLE;
-       spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
-       siginitset(&current->blocked, set);
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-       
-       regs->tpc = regs->tnpc;
-       regs->tnpc += 4;
-       if (test_thread_flag(TIF_32BIT)) {
-               regs->tpc &= 0xffffffff;
-               regs->tnpc &= 0xffffffff;
-       }
-
-       /* Condition codes and return value where set here for sigpause,
-        * and so got used by setup_frame, which again causes sigreturn()
-        * to return -EINTR.
-        */
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               /*
-                * Return -EINTR and set condition code here,
-                * so the interrupted system call actually returns
-                * these.
-                */
-               regs->tstate |= TSTATE_ICARRY;
-               regs->u_regs[UREG_I0] = EINTR;
-               if (do_signal32(&saveset, regs, 0, 0))
-                       return;
-       }
-}
-
-asmlinkage void do_rt_sigsuspend32(u32 uset, size_t sigsetsize, struct pt_regs *regs)
-{
-       sigset_t oldset, set;
-       compat_sigset_t set32;
-        
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (((compat_size_t)sigsetsize) != sizeof(sigset_t)) {
-               regs->tstate |= TSTATE_ICARRY;
-               regs->u_regs[UREG_I0] = EINVAL;
-               return;
-       }
-       if (copy_from_user(&set32, compat_ptr(uset), sizeof(set32))) {
-               regs->tstate |= TSTATE_ICARRY;
-               regs->u_regs[UREG_I0] = EFAULT;
-               return;
-       }
-       switch (_NSIG_WORDS) {
-       case 4: set.sig[3] = set32.sig[6] + (((long)set32.sig[7]) << 32);
-       case 3: set.sig[2] = set32.sig[4] + (((long)set32.sig[5]) << 32);
-       case 2: set.sig[1] = set32.sig[2] + (((long)set32.sig[3]) << 32);
-       case 1: set.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32);
-       }
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       oldset = current->blocked;
-       current->blocked = set;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-       
-       regs->tpc = regs->tnpc;
-       regs->tnpc += 4;
-       if (test_thread_flag(TIF_32BIT)) {
-               regs->tpc &= 0xffffffff;
-               regs->tnpc &= 0xffffffff;
-       }
-
-       /* Condition codes and return value where set here for sigpause,
-        * and so got used by setup_frame, which again causes sigreturn()
-        * to return -EINTR.
-        */
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               /*
-                * Return -EINTR and set condition code here,
-                * so the interrupted system call actually returns
-                * these.
-                */
-               regs->tstate |= TSTATE_ICARRY;
-               regs->u_regs[UREG_I0] = EINTR;
-               if (do_signal32(&oldset, regs, 0, 0))
-                       return;
-       }
-}
-
 static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
 {
        unsigned long *fpregs = current_thread_info()->fpregs;
@@ -1362,8 +1263,8 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-int do_signal32(sigset_t *oldset, struct pt_regs * regs,
-               unsigned long orig_i0, int restart_syscall)
+void do_signal32(sigset_t *oldset, struct pt_regs * regs,
+                unsigned long orig_i0, int restart_syscall)
 {
        siginfo_t info;
        struct signal_deliver_cookie cookie;
@@ -1380,7 +1281,15 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs,
                        syscall_restart32(orig_i0, regs, &ka.sa);
                handle_signal32(signr, &ka, &info, oldset,
                                regs, svr4_signal);
-               return 1;
+
+               /* a signal was successfully delivered; the saved
+                * sigmask will have been stored in the signal frame,
+                * and will be restored by sigreturn, so we can simply
+                * clear the TIF_RESTORE_SIGMASK flag.
+                */
+               if (test_thread_flag(TIF_RESTORE_SIGMASK))
+                       clear_thread_flag(TIF_RESTORE_SIGMASK);
+               return;
        }
        if (cookie.restart_syscall &&
            (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
@@ -1397,7 +1306,14 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs,
                regs->tpc -= 4;
                regs->tnpc -= 4;
        }
-       return 0;
+
+       /* if there's no signal to deliver, we just put the saved sigmask
+        * back
+        */
+       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+       }
 }
 
 struct sigstack32 {
index d177d7e5c9d30b26cf5f456e0ba7da88445fbf04..3c06bfb92a8c81b40236eb8fb678474875d1e012 100644 (file)
@@ -69,7 +69,6 @@ struct poll {
 
 extern void die_if_kernel(char *str, struct pt_regs *regs);
 extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-void _sigpause_common (unsigned int set, struct pt_regs *);
 extern void *__bzero(void *, size_t);
 extern void *__memscan_zero(void *, size_t);
 extern void *__memscan_generic(void *, int, size_t);
@@ -236,9 +235,10 @@ EXPORT_SYMBOL(pci_dma_supported);
 /* I/O device mmaping on Sparc64. */
 EXPORT_SYMBOL(io_remap_pfn_range);
 
+#ifdef CONFIG_COMPAT
 /* Solaris/SunOS binary compatibility */
-EXPORT_SYMBOL(_sigpause_common);
 EXPORT_SYMBOL(verify_compat_iovec);
+#endif
 
 EXPORT_SYMBOL(dump_fpu);
 EXPORT_SYMBOL(pte_alloc_one_kernel);
index 9cd272ac3ac10342de0e8ff4f5e0e4c45a860013..60b59375aa78d31f78b832f76928fc6d4447ce72 100644 (file)
@@ -84,7 +84,6 @@ SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5)
 SIGN2(sys32_bdflush, sys_bdflush, %o0, %o1)
 SIGN1(sys32_mlockall, sys_mlockall, %o0)
 SIGN1(sys32_nfsservctl, compat_sys_nfsservctl, %o0)
-SIGN1(sys32_clock_settime, compat_sys_clock_settime, %o1)
 SIGN1(sys32_clock_nanosleep, compat_sys_clock_nanosleep, %o1)
 SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
 SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
index d4b7a100cb8ac5cf61f59b8522cf90f7e116fc57..9264ccbaaafad7ec436f3a8b68221731dac0c143 100644 (file)
@@ -821,7 +821,7 @@ asmlinkage long sys32_utimes(char __user *filename,
                        return -EFAULT;
        }
 
-       return do_utimes(filename, (tvs ? &ktvs[0] : NULL));
+       return do_utimes(AT_FDCWD, filename, (tvs ? &ktvs[0] : NULL));
 }
 
 /* These are here just in case some old sparc32 binary calls it. */
@@ -1003,7 +1003,7 @@ asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
 asmlinkage long sparc32_open(const char __user *filename,
                             int flags, int mode)
 {
-       return do_sys_open(filename, flags, mode);
+       return do_sys_open(AT_FDCWD, filename, flags, mode);
 }
 
 extern unsigned long do_mremap(unsigned long addr,
index 98d24bc0004441ea0e57f3ef4cc5feb003d0dff7..2881faf36635c522d247e8bc6953c7efce8a3248 100644 (file)
@@ -41,7 +41,7 @@ sys_call_table32:
 /*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid
        .word sys_fsync, sys32_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
 /*100*/ .word sys32_getpriority, sys32_rt_sigreturn, sys32_rt_sigaction, sys32_rt_sigprocmask, sys32_rt_sigpending
-       .word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, sys32_rt_sigsuspend, sys_setresuid, sys_getresuid
+       .word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid
 /*110*/        .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
        .word sys32_getgroups, sys32_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd
 /*120*/        .word compat_sys_readv, compat_sys_writev, sys32_settimeofday, sys32_fchown16, sys_fchmod
@@ -71,12 +71,15 @@ sys_call_table32:
 /*240*/        .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
        .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
 /*250*/        .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
-       .word sys_ni_syscall, sys32_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
+       .word sys_ni_syscall, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
 /*260*/        .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
        .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
 /*270*/        .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
        .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
-/*280*/        .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl
+/*280*/        .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat
+       .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_newfstatat
+/*285*/        .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
+       .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll
 
 #endif /* CONFIG_COMPAT */
 
@@ -142,7 +145,10 @@ sys_call_table:
        .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
 /*270*/        .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
        .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
-/*280*/        .word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl
+/*280*/        .word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl, sys_openat
+       .word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, compat_sys_newfstatat
+/*285*/        .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
+       .word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll
 
 #if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
     defined(CONFIG_SOLARIS_EMUL_MODULE)
@@ -239,13 +245,20 @@ sunos_sys_table:
 /*250*/        .word sunos_nosys, sunos_nosys, sunos_nosys
        .word sunos_nosys, sunos_nosys, sunos_nosys
        .word sunos_nosys, sunos_nosys, sunos_nosys
+       .word sunos_nosys
+/*260*/        .word sunos_nosys, sunos_nosys, sunos_nosys
        .word sunos_nosys, sunos_nosys, sunos_nosys
        .word sunos_nosys, sunos_nosys, sunos_nosys
+       .word sunos_nosys
+/*270*/        .word sunos_nosys, sunos_nosys, sunos_nosys
        .word sunos_nosys, sunos_nosys, sunos_nosys
        .word sunos_nosys, sunos_nosys, sunos_nosys
+       .word sunos_nosys
+/*280*/        .word sunos_nosys, sunos_nosys, sunos_nosys
        .word sunos_nosys, sunos_nosys, sunos_nosys
        .word sunos_nosys, sunos_nosys, sunos_nosys
+       .word sunos_nosys
+/*290*/        .word sunos_nosys, sunos_nosys, sunos_nosys
        .word sunos_nosys, sunos_nosys, sunos_nosys
        .word sunos_nosys, sunos_nosys, sunos_nosys
-       .word sunos_nosys
 #endif
index 459c8fbe02b4df734af56e0d16cde8d0b7859ea9..a22930d62adf0de96b7d365a8de9df0e141993b3 100644 (file)
@@ -280,9 +280,9 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
  * Since STICK is constantly updating, we have to access it carefully.
  *
  * The sequence we use to read is:
- * 1) read low
- * 2) read high
- * 3) read low again, if it rolled over increment high by 1
+ * 1) read high
+ * 2) read low
+ * 3) read high again, if it rolled re-read both low and high again.
  *
  * Writing STICK safely is also tricky:
  * 1) write low to zero
@@ -295,18 +295,18 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
 static unsigned long __hbird_read_stick(void)
 {
        unsigned long ret, tmp1, tmp2, tmp3;
-       unsigned long addr = HBIRD_STICK_ADDR;
+       unsigned long addr = HBIRD_STICK_ADDR+8;
 
-       __asm__ __volatile__("ldxa      [%1] %5, %2\n\t"
-                            "add       %1, 0x8, %1\n\t"
-                            "ldxa      [%1] %5, %3\n\t"
+       __asm__ __volatile__("ldxa      [%1] %5, %2\n"
+                            "1:\n\t"
                             "sub       %1, 0x8, %1\n\t"
+                            "ldxa      [%1] %5, %3\n\t"
+                            "add       %1, 0x8, %1\n\t"
                             "ldxa      [%1] %5, %4\n\t"
                             "cmp       %4, %2\n\t"
-                            "blu,a,pn  %%xcc, 1f\n\t"
-                            " add      %3, 1, %3\n"
-                            "1:\n\t"
-                            "sllx      %3, 32, %3\n\t"
+                            "bne,a,pn  %%xcc, 1b\n\t"
+                            " mov      %4, %2\n\t"
+                            "sllx      %4, 32, %4\n\t"
                             "or        %3, %4, %0\n\t"
                             : "=&r" (ret), "=&r" (addr),
                               "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
index eae5db8dda56468ecaee4db7dfcf8fd01e60f6df..ac6d035dd150e6c8cbe2f7747c2e6873dc886fe6 100644 (file)
@@ -99,8 +99,12 @@ prom_query_input_device(void)
        if (!strncmp(propb, "keyboard", 8))
                return PROMDEV_ITTYA;
 
+       if (!strncmp (propb, "rsc", 3))
+               return PROMDEV_IRSC;
+
        if (strncmp (propb, "tty", 3) || !propb[3])
                return PROMDEV_I_UNK;
+
        switch (propb[3]) {
                case 'a': return PROMDEV_ITTYA;
                case 'b': return PROMDEV_ITTYB;
@@ -136,8 +140,12 @@ prom_query_output_device(void)
        if (!strncmp(propb, "screen", 6))
                return PROMDEV_OTTYA;
 
+       if (!strncmp (propb, "rsc", 3))
+               return PROMDEV_ORSC;
+
        if (strncmp (propb, "tty", 3) || !propb[3])
                return PROMDEV_O_UNK;
+
        switch (propb[3]) {
                case 'a': return PROMDEV_OTTYA;
                case 'b': return PROMDEV_OTTYB;
index 4b6ae583c0a3ecdcd5ca8298c202501f14d764e0..eb314ed23cdbecb693ebef1ff3c797ca51e24696 100644 (file)
@@ -180,6 +180,8 @@ solaris_sigsuspend:
         nop
        call            sys_sigsuspend
         stx            %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+       b,pt            %xcc, ret_from_solaris
+        nop
 
        .globl          solaris_getpid
 solaris_getpid:
index 8ff3bcbce5fcb5172dd75a6469b14ba02056e283..5982fe2753e0286382aa07ba35ba831b98cb2285 100644 (file)
@@ -143,6 +143,7 @@ config HOSTFS
 
 config HPPFS
        tristate "HoneyPot ProcFS (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
        help
        hppfs (HoneyPot ProcFS) is a filesystem which allows UML /proc
        entries to be overridden, removed, or fabricated from the host.
@@ -155,10 +156,6 @@ config HPPFS
        You only need this if you are setting up a UML honeypot.  Otherwise,
        it is safe to say 'N' here.
 
-       If you are actively using it, please report any problems, since it's
-       getting fixed. In this moment, it is experimental on 2.6 (it works on
-       2.4).
-
 config MCONSOLE
        bool "Management console"
        default y
@@ -243,8 +240,16 @@ config NEST_LEVEL
         Only change this if you are running nested UMLs.
 
 config HIGHMEM
-       bool "Highmem support"
-       depends on !64BIT
+       bool "Highmem support (EXPERIMENTAL)"
+       depends on !64BIT && EXPERIMENTAL
+       default n
+       help
+       This was used to allow UML to run with big amounts of memory.
+       Currently it is unstable, so if unsure say N.
+
+       To use big amounts of memory, it is recommended to disable TT mode (i.e.
+       CONFIG_MODE_TT) and enable static linking (i.e. CONFIG_STATIC_LINK) -
+       this should allow the guest to use up to 2.75G of memory.
 
 config KERNEL_STACK_ORDER
        int "Kernel stack size order"
@@ -269,17 +274,13 @@ endmenu
 
 source "init/Kconfig"
 
-source "net/Kconfig"
-
-source "drivers/base/Kconfig"
+source "drivers/block/Kconfig"
 
 source "arch/um/Kconfig.char"
 
-source "drivers/block/Kconfig"
+source "drivers/base/Kconfig"
 
-config NETDEVICES
-       bool
-       default NET
+source "net/Kconfig"
 
 source "arch/um/Kconfig.net"
 
index c71b39a677aa49b41bab93ffaf7374800948abb0..ef79ed25aecd5fd64125a7777990e14d4e4846f0 100644 (file)
@@ -22,13 +22,17 @@ config TOP_ADDR
        default 0x80000000 if HOST_2G_2G
 
 config 3_LEVEL_PGTABLES
-       bool "Three-level pagetables"
+       bool "Three-level pagetables (EXPERIMENTAL)"
        default n
+       depends on EXPERIMENTAL
        help
        Three-level pagetables will let UML have more than 4G of physical
        memory.  All the memory that can't be mapped directly will be treated
        as high memory.
 
+       However, this it experimental on 32-bit architectures, so if unsure say
+       N (on x86-64 it's automatically enabled, instead, as it's safe there).
+
 config STUB_CODE
        hex
        default 0xbfffe000
index 45435ff589c17e8252008431e3ee659dedc08ed8..6430a6383853494fecb56d562ada5918d9df424c 100644 (file)
@@ -32,7 +32,7 @@ um-modes-$(CONFIG_MODE_TT) += tt
 um-modes-$(CONFIG_MODE_SKAS) += skas
 
 MODE_INCLUDE   += $(foreach mode,$(um-modes-y),\
-                  -I$(srctree)/$(ARCH_DIR)/kernel/$(mode)/include)
+                  -I$(srctree)/$(ARCH_DIR)/include/$(mode))
 
 MAKEFILES-INCL += $(foreach mode,$(um-modes-y),\
                   $(srctree)/$(ARCH_DIR)/Makefile-$(mode))
index 30d285b266af5cea7736d98c9c690d41f50f7de7..507e3cbac9d38a281d357f4c40c86309ce585f9e 100644 (file)
@@ -31,6 +31,10 @@ void daemon_init(struct net_device *dev, void *data)
        dpri->fd = -1;
        dpri->control = -1;
        dpri->dev = dev;
+       /* We will free this pointer. If it contains crap we're burned. */
+       dpri->ctl_addr = NULL;
+       dpri->data_addr = NULL;
+       dpri->local_addr = NULL;
 
        printk("daemon backend (uml_switch version %d) - %s:%s", 
               SWITCH_VERSION, dpri->sock_type, dpri->ctl_sock);
index 1bb085b2824d19e25e88ce4e931bcfba25f07a76..c944265955e203f9a11892f2283e3900aebcef11 100644 (file)
@@ -158,10 +158,16 @@ static void daemon_remove(void *data)
        struct daemon_data *pri = data;
 
        os_close_file(pri->fd);
+       pri->fd = -1;
        os_close_file(pri->control);
+       pri->control = -1;
+
        kfree(pri->data_addr);
+       pri->data_addr = NULL;
        kfree(pri->ctl_addr);
+       pri->ctl_addr = NULL;
        kfree(pri->local_addr);
+       pri->local_addr = NULL;
 }
 
 int daemon_user_write(int fd, void *buf, int len, struct daemon_data *pri)
index 3296e86a03a5cd3a197007c1593e127fee3c1d7c..c41f75e4acb5a64d733af6534837026421f52037 100644 (file)
@@ -11,6 +11,7 @@
 #include "user.h"
 #include "user_util.h"
 #include "chan_user.h"
+#include "os.h"
 
 struct fd_chan {
        int fd;
index fb1f9fb9b8717ce74fe95104d711642e4c152a02..8ebb2241ad4263ef2fd1cac2e472d66671862cd7 100644 (file)
@@ -68,6 +68,11 @@ static int uml_net_rx(struct net_device *dev)
        return pkt_len;
 }
 
+static void uml_dev_close(void* dev)
+{
+       dev_close( (struct net_device *) dev);
+}
+
 irqreturn_t uml_net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 {
        struct net_device *dev = dev_id;
@@ -80,15 +85,21 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
        spin_lock(&lp->lock);
        while((err = uml_net_rx(dev)) > 0) ;
        if(err < 0) {
+               DECLARE_WORK(close_work, uml_dev_close, dev);
                printk(KERN_ERR 
                       "Device '%s' read returned %d, shutting it down\n", 
                       dev->name, err);
-               dev_close(dev);
+               /* dev_close can't be called in interrupt context, and takes
+                * again lp->lock.
+                * And dev_close() can be safely called multiple times on the
+                * same device, since it tests for (dev->flags & IFF_UP). So
+                * there's no harm in delaying the device shutdown. */
+               schedule_work(&close_work);
                goto out;
        }
        reactivate_fd(lp->fd, UM_ETH_IRQ);
 
- out:
+out:
        spin_unlock(&lp->lock);
        return(IRQ_HANDLED);
 }
@@ -317,6 +328,11 @@ static int eth_configure(int n, void *init, char *mac,
                return 1;
        }
 
+       lp = dev->priv;
+       /* This points to the transport private data. It's still clear, but we
+        * must memset it to 0 *now*. Let's help the drivers. */
+       memset(lp, 0, size);
+
        /* sysfs register */
        if (!driver_registered) {
                platform_driver_register(&uml_net_driver);
@@ -358,7 +374,6 @@ static int eth_configure(int n, void *init, char *mac,
                free_netdev(dev);
                return 1;
        }
-       lp = dev->priv;
 
        /* lp.user is the first four bytes of the transport data, which
         * has already been initialized.  This structure assignment will
index 7696f8d2d89c75cb21df2fb7727366d167c73730..101efd26d46799a3eeda011409d181dc971502d7 100644 (file)
@@ -1103,31 +1103,33 @@ static int ubd_ioctl(struct inode * inode, struct file * file,
        return(-EINVAL);
 }
 
-static int same_backing_files(char *from_cmdline, char *from_cow, char *cow)
+static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow)
 {
        struct uml_stat buf1, buf2;
        int err;
 
-       if(from_cmdline == NULL) return(1);
-       if(!strcmp(from_cmdline, from_cow)) return(1);
+       if(from_cmdline == NULL)
+               return 0;
+       if(!strcmp(from_cmdline, from_cow))
+               return 0;
 
        err = os_stat_file(from_cmdline, &buf1);
        if(err < 0){
                printk("Couldn't stat '%s', err = %d\n", from_cmdline, -err);
-               return(1);
+               return 0;
        }
        err = os_stat_file(from_cow, &buf2);
        if(err < 0){
                printk("Couldn't stat '%s', err = %d\n", from_cow, -err);
-               return(1);
+               return 1;
        }
        if((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino))
-               return(1);
+               return 0;
 
        printk("Backing file mismatch - \"%s\" requested,\n"
               "\"%s\" specified in COW header of \"%s\"\n",
               from_cmdline, from_cow, cow);
-       return(0);
+       return 1;
 }
 
 static int backing_file_mismatch(char *file, __u64 size, time_t mtime)
@@ -1189,18 +1191,19 @@ int open_ubd_file(char *file, struct openflags *openflags,
        unsigned long long size;
        __u32 version, align;
        char *backing_file;
-       int fd, err, sectorsize, same, mode = 0644;
+       int fd, err, sectorsize, asked_switch, mode = 0644;
 
        fd = os_open_file(file, *openflags, mode);
-       if(fd < 0){
-               if((fd == -ENOENT) && (create_cow_out != NULL))
+       if (fd < 0) {
+               if ((fd == -ENOENT) && (create_cow_out != NULL))
                        *create_cow_out = 1;
-                if(!openflags->w ||
-                   ((fd != -EROFS) && (fd != -EACCES))) return(fd);
+                if (!openflags->w ||
+                   ((fd != -EROFS) && (fd != -EACCES)))
+                       return fd;
                openflags->w = 0;
                fd = os_open_file(file, *openflags, mode);
-               if(fd < 0)
-                       return(fd);
+               if (fd < 0)
+                       return fd;
         }
 
        err = os_lock_file(fd, openflags->w);
@@ -1209,7 +1212,9 @@ int open_ubd_file(char *file, struct openflags *openflags,
                goto out_close;
        }
 
-       if(backing_file_out == NULL) return(fd);
+       /* Succesful return case! */
+       if(backing_file_out == NULL)
+               return(fd);
 
        err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime,
                              &size, &sectorsize, &align, bitmap_offset_out);
@@ -1218,34 +1223,34 @@ int open_ubd_file(char *file, struct openflags *openflags,
                       "errno = %d\n", file, -err);
                goto out_close;
        }
-       if(err) return(fd);
-
-       if(backing_file_out == NULL) return(fd);
+       if(err)
+               return(fd);
 
-       same = same_backing_files(*backing_file_out, backing_file, file);
+       asked_switch = path_requires_switch(*backing_file_out, backing_file, file);
 
-       if(!same && !backing_file_mismatch(*backing_file_out, size, mtime)){
+       /* Allow switching only if no mismatch. */
+       if (asked_switch && !backing_file_mismatch(*backing_file_out, size, mtime)) {
                printk("Switching backing file to '%s'\n", *backing_file_out);
                err = write_cow_header(file, fd, *backing_file_out,
                                       sectorsize, align, &size);
-               if(err){
+               if (err) {
                        printk("Switch failed, errno = %d\n", -err);
-                       return(err);
+                       goto out_close;
                }
-       }
-       else {
+       } else {
                *backing_file_out = backing_file;
                err = backing_file_mismatch(*backing_file_out, size, mtime);
-               if(err) goto out_close;
+               if (err)
+                       goto out_close;
        }
 
        cow_sizes(version, size, sectorsize, align, *bitmap_offset_out,
                  bitmap_len_out, data_offset_out);
 
-        return(fd);
+        return fd;
  out_close:
        os_close_file(fd);
-       return(err);
+       return err;
 }
 
 int create_cow_file(char *cow_file, char *backing_file, struct openflags flags,
index 8f4e46d677ab08cb4305b85cc384a9e173d36bc0..c649108a9e9f298c4266c568ce88627bd8e2eab9 100644 (file)
@@ -120,8 +120,10 @@ extern void machine_halt(void);
 extern int is_syscall(unsigned long addr);
 extern void arch_switch(void);
 extern void free_irq(unsigned int, void *);
-extern int um_in_interrupt(void);
 extern int cpu(void);
+
+/* Are we disallowed to sleep? Used to choose between GFP_KERNEL and GFP_ATOMIC. */
+extern int __cant_sleep(void);
 extern void segv_handler(int sig, union uml_pt_regs *regs);
 extern void sigio_handler(int sig, union uml_pt_regs *regs);
 
diff --git a/arch/um/include/longjmp.h b/arch/um/include/longjmp.h
new file mode 100644 (file)
index 0000000..018b381
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef __UML_LONGJMP_H
+#define __UML_LONGJMP_H
+
+#include <setjmp.h>
+#include "os.h"
+
+#define UML_SIGLONGJMP(buf, val) do { \
+       longjmp(*buf, val);     \
+} while(0)
+
+#define UML_SIGSETJMP(buf, enable) ({ \
+       int n; \
+       enable = get_signals(); \
+       n = setjmp(*buf); \
+       if(n != 0) \
+               set_signals(enable); \
+       n; })
+
+#endif
index 2d88afd0cf167e2477c27b650e37ac40f8e16967..e7539a8451efece50aa56d8ee1b37159b054f496 100644 (file)
@@ -9,22 +9,11 @@
 #include "linux/config.h"
 
 #ifdef CONFIG_MODE_TT
-#include "mode_kern-tt.h"
+#include "mode_kern_tt.h"
 #endif
 
 #ifdef CONFIG_MODE_SKAS
-#include "mode_kern-skas.h"
+#include "mode_kern_skas.h"
 #endif
 
 #endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
index dd72d66cf0ed18457361f0b004d02c2d1136f5c4..eb1710b81255504bfdcb63f052bde2f77981a1fa 100644 (file)
@@ -11,6 +11,7 @@
 #include "../os/include/file.h"
 #include "sysdep/ptrace.h"
 #include "kern_util.h"
+#include "skas/mm_id.h"
 
 #define OS_TYPE_FILE 1 
 #define OS_TYPE_DIR 2 
@@ -190,11 +191,12 @@ extern int os_protect_memory(void *addr, unsigned long len,
                             int r, int w, int x);
 extern int os_unmap_memory(void *addr, int len);
 extern void os_flush_stdout(void);
-extern unsigned long long os_usecs(void);
 
 /* tt.c
  * for tt mode only (will be deleted in future...)
  */
+extern void stop(void);
+extern int wait_for_stop(int pid, int sig, int cont_type, void *relay);
 extern int protect_memory(unsigned long addr, unsigned long len,
                          int r, int w, int x, int must_succeed);
 extern void forward_pending_sigio(int target);
@@ -230,9 +232,63 @@ extern void block_signals(void);
 extern void unblock_signals(void);
 extern int get_signals(void);
 extern int set_signals(int enable);
+extern void os_usr1_signal(int on);
 
 /* trap.c */
 extern void os_fill_handlinfo(struct kern_handlers h);
 extern void do_longjmp(void *p, int val);
 
+/* util.c */
+extern void stack_protections(unsigned long address);
+extern void task_protections(unsigned long address);
+extern int raw(int fd);
+extern void setup_machinename(char *machine_out);
+extern void setup_hostinfo(void);
+extern int setjmp_wrapper(void (*proc)(void *, void *), ...);
+
+/* time.c */
+#define BILLION (1000 * 1000 * 1000)
+
+extern void switch_timers(int to_real);
+extern void idle_sleep(int secs);
+extern void enable_timer(void);
+extern void disable_timer(void);
+extern void user_time_init(void);
+extern void uml_idle_timer(void);
+extern unsigned long long os_nsecs(void);
+
+/* skas/mem.c */
+extern long run_syscall_stub(struct mm_id * mm_idp,
+                            int syscall, unsigned long *args, long expected,
+                            void **addr, int done);
+extern long syscall_stub_data(struct mm_id * mm_idp,
+                             unsigned long *data, int data_count,
+                             void **addr, void **stub_addr);
+extern int map(struct mm_id * mm_idp, unsigned long virt,
+              unsigned long len, int r, int w, int x, int phys_fd,
+              unsigned long long offset, int done, void **data);
+extern int unmap(struct mm_id * mm_idp, void *addr, unsigned long len,
+                int done, void **data);
+extern int protect(struct mm_id * mm_idp, unsigned long addr,
+                  unsigned long len, int r, int w, int x, int done,
+                  void **data);
+
+/* skas/process.c */
+extern int is_skas_winch(int pid, int fd, void *data);
+extern int start_userspace(unsigned long stub_stack);
+extern int copy_context_skas0(unsigned long stack, int pid);
+extern void userspace(union uml_pt_regs *regs);
+extern void map_stub_pages(int fd, unsigned long code,
+                          unsigned long data, unsigned long stack);
+extern void new_thread(void *stack, void **switch_buf_ptr,
+                        void **fork_buf_ptr, void (*handler)(int));
+extern void thread_wait(void *sw, void *fb);
+extern void switch_threads(void *me, void *next);
+extern int start_idle_thread(void *stack, void *switch_buf_ptr,
+                            void **fork_buf_ptr);
+extern void initial_thread_cb_skas(void (*proc)(void *),
+                                void *arg);
+extern void halt_skas(void);
+extern void reboot_skas(void);
+
 #endif
diff --git a/arch/um/include/skas/mmu-skas.h b/arch/um/include/skas/mmu-skas.h
new file mode 100644 (file)
index 0000000..d8869a6
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __SKAS_MMU_H
+#define __SKAS_MMU_H
+
+#include "linux/config.h"
+#include "mm_id.h"
+#include "asm/ldt.h"
+
+struct mmu_context_skas {
+       struct mm_id id;
+       unsigned long last_page_table;
+#ifdef CONFIG_3_LEVEL_PGTABLES
+       unsigned long last_pmd;
+#endif
+       uml_ldt_t ldt;
+};
+
+extern void switch_mm_skas(struct mm_id * mm_idp);
+
+#endif
diff --git a/arch/um/include/skas/mode-skas.h b/arch/um/include/skas/mode-skas.h
new file mode 100644 (file)
index 0000000..260065c
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __MODE_SKAS_H__
+#define __MODE_SKAS_H__
+
+#include <sysdep/ptrace.h>
+
+extern unsigned long exec_regs[];
+extern unsigned long exec_fp_regs[];
+extern unsigned long exec_fpx_regs[];
+extern int have_fpx_regs;
+
+extern void sig_handler_common_skas(int sig, void *sc_ptr);
+extern void kill_off_processes_skas(void);
+
+#endif
similarity index 77%
rename from arch/um/kernel/skas/include/mode_kern-skas.h
rename to arch/um/include/skas/mode_kern_skas.h
index c97a80dfe37052bb5bf8f51378ae0f809d29b15e..63c58739bde027baec6e1d6e68bd03d1564da22c 100644 (file)
@@ -18,7 +18,6 @@ extern int copy_thread_skas(int nr, unsigned long clone_flags,
                            unsigned long sp, unsigned long stack_top,
                            struct task_struct *p, struct pt_regs *regs);
 extern void release_thread_skas(struct task_struct *task);
-extern void initial_thread_cb_skas(void (*proc)(void *), void *arg);
 extern void init_idle_skas(void);
 extern void flush_tlb_kernel_range_skas(unsigned long start,
                                        unsigned long end);
@@ -39,14 +38,3 @@ extern int thread_pid_skas(struct task_struct *task);
 #define kmem_end_skas (host_task_size - 1024 * 1024)
 
 #endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
similarity index 60%
rename from arch/um/kernel/skas/include/proc_mm.h
rename to arch/um/include/skas/proc_mm.h
index cce61a679052cc42174245136665d715f6a87b9c..9028092096033d0fdcdc4aff1dece32b239e64e2 100644 (file)
@@ -1,4 +1,4 @@
-/* 
+/*
  * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  * Licensed under the GPL
  */
@@ -22,13 +22,13 @@ struct mm_mmap {
 
 struct mm_munmap {
        unsigned long addr;
-       unsigned long len;      
+       unsigned long len;
 };
 
 struct mm_mprotect {
        unsigned long addr;
        unsigned long len;
-        unsigned int prot;
+       unsigned int prot;
 };
 
 struct proc_mm_op {
@@ -42,14 +42,3 @@ struct proc_mm_op {
 };
 
 #endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/include/skas/skas.h b/arch/um/include/skas/skas.h
new file mode 100644 (file)
index 0000000..8635728
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __SKAS_H
+#define __SKAS_H
+
+#include "mm_id.h"
+#include "sysdep/ptrace.h"
+
+extern int userspace_pid[];
+extern int proc_mm, ptrace_faultinfo, ptrace_ldt;
+extern int skas_needs_stub;
+
+extern int user_thread(unsigned long stack, int flags);
+extern void new_thread_proc(void *stack, void (*handler)(int sig));
+extern void new_thread_handler(int sig);
+extern void handle_syscall(union uml_pt_regs *regs);
+extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
+extern int new_mm(unsigned long stack);
+extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
+extern long execute_syscall_skas(void *r);
+extern unsigned long current_stub_stack(void);
+
+#endif
similarity index 63%
rename from arch/um/kernel/skas/include/uaccess-skas.h
rename to arch/um/include/skas/uaccess-skas.h
index 64516c556cdf98a425b14d4d7c961babc224125d..224a75f4c02599a3cf314dd4146b6d1a92448858 100644 (file)
@@ -19,14 +19,3 @@ extern int clear_user_skas(void __user *mem, int len);
 extern int strnlen_user_skas(const void __user *str, int len);
 
 #endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/include/time_user.h b/arch/um/include/time_user.h
deleted file mode 100644 (file)
index 17d7ef2..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/* 
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __TIME_USER_H__
-#define __TIME_USER_H__
-
-extern void timer(void);
-extern void switch_timers(int to_real);
-extern void idle_sleep(int secs);
-extern void enable_timer(void);
-extern void prepare_timer(void * ptr);
-extern void disable_timer(void);
-extern unsigned long time_lock(void);
-extern void time_unlock(unsigned long);
-extern void user_time_init(void);
-
-#endif
similarity index 99%
rename from arch/um/kernel/tt/include/debug.h
rename to arch/um/include/tt/debug.h
index 738435461e137ddf51ec54ca395677b5965ef037..9778fa8382965d26bf1b96cff96638b1b2180ee0 100644 (file)
@@ -1,4 +1,4 @@
-/* 
+/*
  * Copyright (C) 2000, 2001, 2002  Jeff Dike (jdike@karaya.com) and
  * Lars Brinkhoff.
  * Licensed under the GPL
diff --git a/arch/um/include/tt/mmu-tt.h b/arch/um/include/tt/mmu-tt.h
new file mode 100644 (file)
index 0000000..572a78b
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __TT_MMU_H
+#define __TT_MMU_H
+
+struct mmu_context_tt {
+};
+
+#endif
diff --git a/arch/um/include/tt/mode-tt.h b/arch/um/include/tt/mode-tt.h
new file mode 100644 (file)
index 0000000..2823cd5
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __MODE_TT_H__
+#define __MODE_TT_H__
+
+#include "sysdep/ptrace.h"
+
+enum { OP_NONE, OP_EXEC, OP_FORK, OP_TRACE_ON, OP_REBOOT, OP_HALT, OP_CB };
+
+extern int tracing_pid;
+
+extern int tracer(int (*init_proc)(void *), void *sp);
+extern void sig_handler_common_tt(int sig, void *sc);
+extern void syscall_handler_tt(int sig, union uml_pt_regs *regs);
+extern void reboot_tt(void);
+extern void halt_tt(void);
+extern int is_tracer_winch(int pid, int fd, void *data);
+extern void kill_off_processes_tt(void);
+
+#endif
diff --git a/arch/um/include/tt/mode_kern_tt.h b/arch/um/include/tt/mode_kern_tt.h
new file mode 100644 (file)
index 0000000..efa0012
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __TT_MODE_KERN_H__
+#define __TT_MODE_KERN_H__
+
+#include "linux/sched.h"
+#include "asm/page.h"
+#include "asm/ptrace.h"
+#include "asm/uaccess.h"
+
+extern void switch_to_tt(void *prev, void *next);
+extern void flush_thread_tt(void);
+extern void start_thread_tt(struct pt_regs *regs, unsigned long eip,
+                          unsigned long esp);
+extern int copy_thread_tt(int nr, unsigned long clone_flags, unsigned long sp,
+                         unsigned long stack_top, struct task_struct *p,
+                         struct pt_regs *regs);
+extern void release_thread_tt(struct task_struct *task);
+extern void initial_thread_cb_tt(void (*proc)(void *), void *arg);
+extern void init_idle_tt(void);
+extern void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end);
+extern void flush_tlb_kernel_vm_tt(void);
+extern void __flush_tlb_one_tt(unsigned long addr);
+extern void flush_tlb_range_tt(struct vm_area_struct *vma,
+                              unsigned long start, unsigned long end);
+extern void flush_tlb_mm_tt(struct mm_struct *mm);
+extern void force_flush_all_tt(void);
+extern long execute_syscall_tt(void *r);
+extern void before_mem_tt(unsigned long brk_start);
+extern unsigned long set_task_sizes_tt(int arg, unsigned long *host_size_out,
+                                      unsigned long *task_size_out);
+extern int start_uml_tt(void);
+extern int external_pid_tt(struct task_struct *task);
+extern int thread_pid_tt(struct task_struct *task);
+
+#define kmem_end_tt (host_task_size - ABOVE_KMEM)
+
+#endif
similarity index 73%
rename from arch/um/kernel/tt/include/tt.h
rename to arch/um/include/tt/tt.h
index c667b67af405fc948396e9370e9c247f4d2e8a56..8085219801861938cae75cc5c25a7edaf53cead3 100644 (file)
@@ -1,4 +1,4 @@
-/* 
+/*
  * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  * Licensed under the GPL
  */
@@ -34,13 +34,3 @@ extern long execute_syscall_tt(void *r);
 
 #endif
 
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
similarity index 80%
rename from arch/um/kernel/tt/include/uaccess-tt.h
rename to arch/um/include/tt/uaccess-tt.h
index b9bfe9c481c4eadd31601623bbe2e9de89dec1ea..b19645f32f24d9e5853b11c65afc200e9fd7c42b 100644 (file)
@@ -46,14 +46,3 @@ extern int clear_user_tt(void __user *mem, int len);
 extern int strnlen_user_tt(const void __user *str, int len);
 
 #endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
index 0f865ef46918190d528f556506ca9439d75da175..91b0ac4ad88cceb8b9961c6ba667f4d16133a197 100644 (file)
@@ -18,6 +18,7 @@ extern int open_gdb_chan(void);
 extern unsigned long strlcpy(char *, const char *, unsigned long);
 extern unsigned long strlcat(char *, const char *, unsigned long);
 extern void *um_vmalloc(int size);
+extern void *um_vmalloc_atomic(int size);
 extern void vfree(void *ptr);
 
 #endif
index c1dbd77b073f322ef3ee6369deeefb4235b04a73..a6f1f176cf84ad6e95ced622be62d20879f72a37 100644 (file)
@@ -44,10 +44,6 @@ extern unsigned long brk_start;
 extern int pty_output_sigio;
 extern int pty_close_sigio;
 
-extern void stop(void);
-extern void stack_protections(unsigned long address);
-extern void task_protections(unsigned long address);
-extern int wait_for_stop(int pid, int sig, int cont_type, void *relay);
 extern void *add_signal_handler(int sig, void (*handler)(int));
 extern int linux_main(int argc, char **argv);
 extern void set_cmdline(char *cmd);
@@ -55,8 +51,6 @@ extern void input_cb(void (*proc)(void *), void *arg, int arg_len);
 extern int get_pty(void);
 extern void *um_kmalloc(int size);
 extern int switcheroo(int fd, int prot, void *from, void *to, int size);
-extern void setup_machinename(char *machine_out);
-extern void setup_hostinfo(void);
 extern void do_exec(int old_pid, int new_pid);
 extern void tracer_panic(char *msg, ...);
 extern int detach(int pid, int sig);
@@ -70,18 +64,6 @@ extern int cpu_feature(char *what, char *buf, int len);
 extern int arch_handle_signal(int sig, union uml_pt_regs *regs);
 extern int arch_fixup(unsigned long address, void *sc_ptr);
 extern void arch_init_thread(void);
-extern int setjmp_wrapper(void (*proc)(void *, void *), ...);
 extern int raw(int fd);
 
 #endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
index 193cc2b7448d7e6c08b47f1828870a61a96d414b..693018ba80f1f956d8c7d13fd863847d433514e3 100644 (file)
@@ -9,9 +9,8 @@ clean-files :=
 obj-y = config.o exec_kern.o exitcode.o \
        init_task.o irq.o irq_user.o ksyms.o mem.o physmem.o \
        process_kern.o ptrace.o reboot.o resource.o sigio_user.o sigio_kern.o \
-       signal_kern.o smp.o syscall_kern.o sysrq.o time.o \
-       time_kern.o tlb.o trap_kern.o uaccess.o um_arch.o umid.o \
-       user_util.o
+       signal_kern.o smp.o syscall_kern.o sysrq.o \
+       time_kern.o tlb.o trap_kern.o uaccess.o um_arch.o umid.o
 
 obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
 obj-$(CONFIG_GPROF)    += gprof_syms.o
@@ -24,7 +23,7 @@ obj-$(CONFIG_MODE_SKAS) += skas/
 
 user-objs-$(CONFIG_TTY_LOG) += tty_log.o
 
-USER_OBJS := $(user-objs-y) config.o time.o tty_log.o user_util.o
+USER_OBJS := $(user-objs-y) config.o tty_log.o
 
 include arch/um/scripts/Makefile.rules
 
index efd222ffe20e1df22fa2e04a4c61cdd5d6a3a419..569fe8b9b0535d362d20e3b14d215f46aaae67cb 100644 (file)
@@ -17,7 +17,6 @@
 #include "irq_user.h"
 #include "tlb.h"
 #include "os.h"
-#include "time_user.h"
 #include "choose-mode.h"
 #include "mode_kern.h"
 
index 7f13b85d26564609d2f1c2fca954024a3bf28a4f..3113cab8675e6358c6909201ebbb3ae90beacf60 100644 (file)
@@ -39,7 +39,6 @@
 #include "init.h"
 #include "irq_user.h"
 #include "mem_user.h"
-#include "time_user.h"
 #include "tlb.h"
 #include "frame_kern.h"
 #include "sigcontext.h"
@@ -288,17 +287,27 @@ EXPORT_SYMBOL(disable_hlt);
 
 void *um_kmalloc(int size)
 {
-       return(kmalloc(size, GFP_KERNEL));
+       return kmalloc(size, GFP_KERNEL);
 }
 
 void *um_kmalloc_atomic(int size)
 {
-       return(kmalloc(size, GFP_ATOMIC));
+       return kmalloc(size, GFP_ATOMIC);
 }
 
 void *um_vmalloc(int size)
 {
-       return(vmalloc(size));
+       return vmalloc(size);
+}
+
+void *um_vmalloc_atomic(int size)
+{
+       return __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM, PAGE_KERNEL);
+}
+
+int __cant_sleep(void) {
+       return in_atomic() || irqs_disabled() || in_interrupt();
+       /* Is in_interrupt() really needed? */
 }
 
 unsigned long get_fault_addr(void)
@@ -370,11 +379,6 @@ int smp_sigio_handler(void)
        return(0);
 }
 
-int um_in_interrupt(void)
-{
-       return(in_interrupt());
-}
-
 int cpu(void)
 {
        return(current_thread->cpu);
index 62e5cfdf21881ca1d8bad038e497a7026afa2f6e..f7b18e157d3509d9b4302b1dc8dfd39c02756630 100644 (file)
@@ -337,70 +337,103 @@ int ignore_sigio_fd(int fd)
        return(err);
 }
 
-static int setup_initial_poll(int fd)
+static struct pollfd* setup_initial_poll(int fd)
 {
        struct pollfd *p;
 
-       p = um_kmalloc_atomic(sizeof(struct pollfd));
-       if(p == NULL){
+       p = um_kmalloc(sizeof(struct pollfd));
+       if (p == NULL) {
                printk("setup_initial_poll : failed to allocate poll\n");
-               return(-1);
+               return NULL;
        }
        *p = ((struct pollfd) { .fd     = fd,
                                .events         = POLLIN,
                                .revents        = 0 });
-       current_poll = ((struct pollfds) { .poll        = p,
-                                          .used        = 1,
-                                          .size        = 1 });
-       return(0);
+       return p;
 }
 
 void write_sigio_workaround(void)
 {
        unsigned long stack;
+       struct pollfd *p;
        int err;
+       int l_write_sigio_fds[2];
+       int l_sigio_private[2];
+       int l_write_sigio_pid;
 
+       /* We call this *tons* of times - and most ones we must just fail. */
        sigio_lock();
-       if(write_sigio_pid != -1)
-               goto out;
+       l_write_sigio_pid = write_sigio_pid;
+       sigio_unlock();
 
-       err = os_pipe(write_sigio_fds, 1, 1);
+       if (l_write_sigio_pid != -1)
+               return;
+
+       err = os_pipe(l_write_sigio_fds, 1, 1);
        if(err < 0){
                printk("write_sigio_workaround - os_pipe 1 failed, "
                       "err = %d\n", -err);
-               goto out;
+               return;
        }
-       err = os_pipe(sigio_private, 1, 1);
+       err = os_pipe(l_sigio_private, 1, 1);
        if(err < 0){
-               printk("write_sigio_workaround - os_pipe 2 failed, "
+               printk("write_sigio_workaround - os_pipe 1 failed, "
                       "err = %d\n", -err);
                goto out_close1;
        }
-       if(setup_initial_poll(sigio_private[1]))
+
+       p = setup_initial_poll(l_sigio_private[1]);
+       if(!p)
                goto out_close2;
 
-       write_sigio_pid = run_helper_thread(write_sigio_thread, NULL, 
+       sigio_lock();
+
+       /* Did we race? Don't try to optimize this, please, it's not so likely
+        * to happen, and no more than once at the boot. */
+       if(write_sigio_pid != -1)
+               goto out_unlock;
+
+       write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
                                            CLONE_FILES | CLONE_VM, &stack, 0);
 
-       if(write_sigio_pid < 0) goto out_close2;
+       if (write_sigio_pid < 0)
+               goto out_clear;
 
-       if(write_sigio_irq(write_sigio_fds[0])) 
+       if (write_sigio_irq(l_write_sigio_fds[0]))
                goto out_kill;
 
- out:
+       /* Success, finally. */
+       memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
+       memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
+
+       current_poll = ((struct pollfds) { .poll        = p,
+                                          .used        = 1,
+                                          .size        = 1 });
+
        sigio_unlock();
        return;
 
  out_kill:
-       os_kill_process(write_sigio_pid, 1);
+       l_write_sigio_pid = write_sigio_pid;
        write_sigio_pid = -1;
+       sigio_unlock();
+       /* Going to call waitpid, avoid holding the lock. */
+       os_kill_process(l_write_sigio_pid, 1);
+       goto out_free;
+
+ out_clear:
+       write_sigio_pid = -1;
+ out_unlock:
+       sigio_unlock();
+ out_free:
+       kfree(p);
  out_close2:
-       os_close_file(sigio_private[0]);
-       os_close_file(sigio_private[1]);
+       os_close_file(l_sigio_private[0]);
+       os_close_file(l_sigio_private[1]);
  out_close1:
-       os_close_file(write_sigio_fds[0]);
-       os_close_file(write_sigio_fds[1]);
-       sigio_unlock();
+       os_close_file(l_write_sigio_fds[0]);
+       os_close_file(l_write_sigio_fds[1]);
+       return;
 }
 
 int read_sigio_fd(int fd)
index 7b0e0e81c16196d7244819316a2b0cf725f1e62e..da17b7541e08dd4c2526159f6a2cfaab94ee7486 100644 (file)
@@ -99,31 +99,46 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
        return err;
 }
 
-static int kern_do_signal(struct pt_regs *regs, sigset_t *oldset)
+static int kern_do_signal(struct pt_regs *regs)
 {
        struct k_sigaction ka_copy;
        siginfo_t info;
+       sigset_t *oldset;
        int sig, handled_sig = 0;
 
+       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+               oldset = &current->saved_sigmask;
+       else
+               oldset = &current->blocked;
+
        while((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0){
                handled_sig = 1;
                /* Whee!  Actually deliver the signal.  */
-               if(!handle_signal(regs, sig, &ka_copy, &info, oldset))
+               if(!handle_signal(regs, sig, &ka_copy, &info, oldset)){
+                       /* a signal was successfully delivered; the saved
+                        * sigmask will have been stored in the signal frame,
+                        * and will be restored by sigreturn, so we can simply
+                        * clear the TIF_RESTORE_SIGMASK flag */
+                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+                               clear_thread_flag(TIF_RESTORE_SIGMASK);
                        break;
+               }
        }
 
        /* Did we come from a system call? */
        if(!handled_sig && (PT_REGS_SYSCALL_NR(regs) >= 0)){
                /* Restart the system call - no handlers present */
-               if(PT_REGS_SYSCALL_RET(regs) == -ERESTARTNOHAND ||
-                  PT_REGS_SYSCALL_RET(regs) == -ERESTARTSYS ||
-                  PT_REGS_SYSCALL_RET(regs) == -ERESTARTNOINTR){
+               switch(PT_REGS_SYSCALL_RET(regs)){
+               case -ERESTARTNOHAND:
+               case -ERESTARTSYS:
+               case -ERESTARTNOINTR:
                        PT_REGS_ORIG_SYSCALL(regs) = PT_REGS_SYSCALL_NR(regs);
                        PT_REGS_RESTART_SYSCALL(regs);
-               }
-               else if(PT_REGS_SYSCALL_RET(regs) == -ERESTART_RESTARTBLOCK){
+                       break;
+               case -ERESTART_RESTARTBLOCK:
                        PT_REGS_SYSCALL_RET(regs) = __NR_restart_syscall;
                        PT_REGS_RESTART_SYSCALL(regs);
+                       break;
                }
        }
 
@@ -137,12 +152,19 @@ static int kern_do_signal(struct pt_regs *regs, sigset_t *oldset)
        if(current->ptrace & PT_DTRACE)
                current->thread.singlestep_syscall =
                        is_syscall(PT_REGS_IP(&current->thread.regs));
+
+       /* if there's no signal to deliver, we just put the saved sigmask
+        * back */
+       if (!handled_sig && test_thread_flag(TIF_RESTORE_SIGMASK)) {
+               clear_thread_flag(TIF_RESTORE_SIGMASK);
+               sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+       }
        return(handled_sig);
 }
 
 int do_signal(void)
 {
-       return(kern_do_signal(&current->thread.regs, &current->blocked));
+       return(kern_do_signal(&current->thread.regs));
 }
 
 /*
@@ -150,63 +172,20 @@ int do_signal(void)
  */
 long sys_sigsuspend(int history0, int history1, old_sigset_t mask)
 {
-       sigset_t saveset;
-
        mask &= _BLOCKABLE;
        spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
+       current->saved_sigmask = current->blocked;
        siginitset(&current->blocked, mask);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       PT_REGS_SYSCALL_RET(&current->thread.regs) = -EINTR;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if(kern_do_signal(&current->thread.regs, &saveset))
-                       return(-EINTR);
-       }
-}
-
-long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
-{
-       sigset_t saveset, newset;
-
-       /* XXX: Don't preclude handling different sized sigset_t's.  */
-       if (sigsetsize != sizeof(sigset_t))
-               return -EINVAL;
-
-       if (copy_from_user(&newset, unewset, sizeof(newset)))
-               return -EFAULT;
-       sigdelsetmask(&newset, ~_BLOCKABLE);
-
-       spin_lock_irq(&current->sighand->siglock);
-       saveset = current->blocked;
-       current->blocked = newset;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       PT_REGS_SYSCALL_RET(&current->thread.regs) = -EINTR;
-       while (1) {
-               current->state = TASK_INTERRUPTIBLE;
-               schedule();
-               if (kern_do_signal(&current->thread.regs, &saveset))
-                       return(-EINTR);
-       }
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+       set_thread_flag(TIF_RESTORE_SIGMASK);
+       return -ERESTARTNOHAND;
 }
 
 long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
 {
        return(do_sigaltstack(uss, uoss, PT_REGS_SP(&current->thread.regs)));
 }
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
index 7a9fc16d71d4805df78eb6505516e3e88537aa78..57181a920d48c9658157b8b05ff1586bafa82f58 100644 (file)
@@ -1,12 +1,12 @@
-# 
+#
 # Copyright (C) 2002 - 2004 Jeff Dike (jdike@addtoit.com)
 # Licensed under the GPL
 #
 
-obj-y := clone.o exec_kern.o mem.o mem_user.o mmu.o process.o process_kern.o \
+obj-y := clone.o exec_kern.o mem.o mmu.o process_kern.o \
        syscall.o tlb.o uaccess.o
 
-USER_OBJS := process.o clone.o
+USER_OBJS := clone.o
 
 include arch/um/scripts/Makefile.rules
 
diff --git a/arch/um/kernel/skas/include/mmu-skas.h b/arch/um/kernel/skas/include/mmu-skas.h
deleted file mode 100644 (file)
index 44110c5..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __SKAS_MMU_H
-#define __SKAS_MMU_H
-
-#include "linux/config.h"
-#include "mm_id.h"
-#include "asm/ldt.h"
-
-struct mmu_context_skas {
-       struct mm_id id;
-        unsigned long last_page_table;
-#ifdef CONFIG_3_LEVEL_PGTABLES
-        unsigned long last_pmd;
-#endif
-       uml_ldt_t ldt;
-};
-
-extern void switch_mm_skas(struct mm_id * mm_idp);
-
-#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/kernel/skas/include/mode-skas.h b/arch/um/kernel/skas/include/mode-skas.h
deleted file mode 100644 (file)
index bcd26a6..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __MODE_SKAS_H__
-#define __MODE_SKAS_H__
-
-#include <sysdep/ptrace.h>
-
-extern unsigned long exec_regs[];
-extern unsigned long exec_fp_regs[];
-extern unsigned long exec_fpx_regs[];
-extern int have_fpx_regs;
-
-extern void sig_handler_common_skas(int sig, void *sc_ptr);
-extern void halt_skas(void);
-extern void reboot_skas(void);
-extern void kill_off_processes_skas(void);
-extern int is_skas_winch(int pid, int fd, void *data);
-
-#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/kernel/skas/include/skas.h b/arch/um/kernel/skas/include/skas.h
deleted file mode 100644 (file)
index 01d489d..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/* 
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __SKAS_H
-#define __SKAS_H
-
-#include "mm_id.h"
-#include "sysdep/ptrace.h"
-
-extern int userspace_pid[];
-extern int proc_mm, ptrace_faultinfo, ptrace_ldt;
-extern int skas_needs_stub;
-
-extern void switch_threads(void *me, void *next);
-extern void thread_wait(void *sw, void *fb);
-extern void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr,
-                       void (*handler)(int));
-extern int start_idle_thread(void *stack, void *switch_buf_ptr, 
-                            void **fork_buf_ptr);
-extern int user_thread(unsigned long stack, int flags);
-extern void userspace(union uml_pt_regs *regs);
-extern void new_thread_proc(void *stack, void (*handler)(int sig));
-extern void new_thread_handler(int sig);
-extern void handle_syscall(union uml_pt_regs *regs);
-extern int map(struct mm_id * mm_idp, unsigned long virt,
-              unsigned long len, int r, int w, int x, int phys_fd,
-              unsigned long long offset, int done, void **data);
-extern int unmap(struct mm_id * mm_idp, void *addr, unsigned long len,
-                int done, void **data);
-extern int protect(struct mm_id * mm_idp, unsigned long addr,
-                  unsigned long len, int r, int w, int x, int done,
-                  void **data);
-extern void user_signal(int sig, union uml_pt_regs *regs, int pid);
-extern int new_mm(int from, unsigned long stack);
-extern int start_userspace(unsigned long stub_stack);
-extern int copy_context_skas0(unsigned long stack, int pid);
-extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
-extern long execute_syscall_skas(void *r);
-extern unsigned long current_stub_stack(void);
-extern long run_syscall_stub(struct mm_id * mm_idp,
-                             int syscall, unsigned long *args, long expected,
-                             void **addr, int done);
-extern long syscall_stub_data(struct mm_id * mm_idp,
-                              unsigned long *data, int data_count,
-                              void **addr, void **stub_addr);
-
-#endif
index 677871f1b37c65d2379cd4b4b3f58e2fc9aec067..c5c9885a82979f70c2a66a1753ef683a0114b932 100644 (file)
@@ -78,7 +78,7 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
        struct mmu_context_skas *from_mm = NULL;
        struct mmu_context_skas *to_mm = &mm->context.skas;
        unsigned long stack = 0;
-       int from_fd, ret = -ENOMEM;
+       int ret = -ENOMEM;
 
        if(skas_needs_stub){
                stack = get_zeroed_page(GFP_KERNEL);
@@ -108,11 +108,7 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
                from_mm = &current->mm->context.skas;
 
        if(proc_mm){
-               if(from_mm)
-                       from_fd = from_mm->id.u.mm_fd;
-               else from_fd = -1;
-
-               ret = new_mm(from_fd, stack);
+               ret = new_mm(stack);
                if(ret < 0){
                        printk("init_new_context_skas - new_mm failed, "
                               "errno = %d\n", ret);
index 3b3955d844070f781f7d3b1ffe69ef969ae395ff..eea1c9c4bb0fcd531803dbd6f9bfb468ed87055f 100644 (file)
@@ -18,7 +18,6 @@
 #include <asm/types.h>
 #include "user.h"
 #include "ptrace_user.h"
-#include "time_user.h"
 #include "sysdep/ptrace.h"
 #include "user_util.h"
 #include "kern_util.h"
index dc41c6dc2f343cce1fb28723a76534221be30e3b..3f70a2e12f067504f48243a204190ff2eff44027 100644 (file)
@@ -1,4 +1,4 @@
-/* 
+/*
  * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  * Licensed under the GPL
  */
 #include "asm/uaccess.h"
 #include "asm/atomic.h"
 #include "kern_util.h"
-#include "time_user.h"
 #include "skas.h"
 #include "os.h"
 #include "user_util.h"
 #include "tlb.h"
 #include "kern.h"
 #include "mode.h"
-#include "proc_mm.h"
 #include "registers.h"
 
 void switch_to_skas(void *prev, void *next)
@@ -34,7 +32,7 @@ void switch_to_skas(void *prev, void *next)
        if(current->pid == 0)
                switch_timers(0);
 
-       switch_threads(&from->thread.mode.skas.switch_buf, 
+       switch_threads(&from->thread.mode.skas.switch_buf,
                       to->thread.mode.skas.switch_buf);
 
        if(current->pid == 0)
@@ -50,8 +48,8 @@ void new_thread_handler(int sig)
 
        fn = current->thread.request.u.thread.proc;
        arg = current->thread.request.u.thread.arg;
-       change_sig(SIGUSR1, 1);
-       thread_wait(&current->thread.mode.skas.switch_buf, 
+       os_usr1_signal(1);
+       thread_wait(&current->thread.mode.skas.switch_buf,
                    current->thread.mode.skas.fork_buf);
 
        if(current->thread.prev_sched != NULL)
@@ -82,8 +80,8 @@ void release_thread_skas(struct task_struct *task)
 
 void fork_handler(int sig)
 {
-        change_sig(SIGUSR1, 1);
-       thread_wait(&current->thread.mode.skas.switch_buf, 
+       os_usr1_signal(1);
+       thread_wait(&current->thread.mode.skas.switch_buf,
                    current->thread.mode.skas.fork_buf);
        
        force_flush_all();
@@ -93,13 +91,13 @@ void fork_handler(int sig)
        schedule_tail(current->thread.prev_sched);
        current->thread.prev_sched = NULL;
 
-       /* Handle any immediate reschedules or signals */
+/* Handle any immediate reschedules or signals */
        interrupt_end();
        userspace(&current->thread.regs.regs);
 }
 
 int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
-                    unsigned long stack_top, struct task_struct * p, 
+                    unsigned long stack_top, struct task_struct * p,
                     struct pt_regs *regs)
 {
        void (*handler)(int);
@@ -123,27 +121,14 @@ int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
        return(0);
 }
 
-extern void map_stub_pages(int fd, unsigned long code,
-                          unsigned long data, unsigned long stack);
-int new_mm(int from, unsigned long stack)
+int new_mm(unsigned long stack)
 {
-       struct proc_mm_op copy;
-       int n, fd;
+       int fd;
 
        fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
        if(fd < 0)
                return(fd);
 
-       if(from != -1){
-               copy = ((struct proc_mm_op) { .op       = MM_COPY_SEGMENTS,
-                                             .u        =
-                                             { .copy_segments  = from } } );
-               n = os_write_file(fd, &copy, sizeof(copy));
-               if(n != sizeof(copy))
-                       printk("new_mm : /proc/mm copy_segments failed, "
-                              "err = %d\n", -n);
-       }
-
        if(skas_needs_stub)
                map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);
 
index a5a47528dec7a68cbab0c9e290461a66eea6af2c..5992c3257167443a0ec57d2be3093a02d9af412e 100644 (file)
@@ -13,7 +13,7 @@
 #include "asm/pgtable.h"
 #include "asm/uaccess.h"
 #include "kern_util.h"
-#include "user_util.h"
+#include "os.h"
 
 extern void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
                             pte_t *pte_out);
index 1429c131879d2c0f4a7ea548713e6078d313960b..1731d90e68507e1316cfa4a3c6c3d15f510f316b 100644 (file)
@@ -25,12 +25,12 @@ int record_syscall_start(int syscall)
        syscall_record[index].syscall = syscall;
        syscall_record[index].pid = current_pid();
        syscall_record[index].result = 0xdeadbeef;
-       syscall_record[index].start = os_usecs();
+       syscall_record[index].start = os_nsecs();
        return(index);
 }
 
 void record_syscall_end(int index, long result)
 {
        syscall_record[index].result = result;
-       syscall_record[index].end = os_usecs();
+       syscall_record[index].end = os_nsecs();
 }
index 020ca79b8d33200b3c6f4c29c582e4528cf5a00b..3c7626cdba4be3c2a1364d7c429f317d232f20b8 100644 (file)
@@ -1,4 +1,4 @@
-/* 
+/*
  * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
  * Licensed under the GPL
  */
 #include "linux/interrupt.h"
 #include "linux/init.h"
 #include "linux/delay.h"
+#include "linux/hrtimer.h"
 #include "asm/irq.h"
 #include "asm/param.h"
 #include "asm/current.h"
 #include "kern_util.h"
 #include "user_util.h"
-#include "time_user.h"
 #include "mode.h"
 #include "os.h"
 
@@ -39,7 +39,7 @@ unsigned long long sched_clock(void)
 int timer_irq_inited = 0;
 
 static int first_tick;
-static unsigned long long prev_usecs;
+static unsigned long long prev_nsecs;
 #ifdef CONFIG_UML_REAL_TIME_CLOCK
 static long long delta;                /* Deviation per interval */
 #endif
@@ -58,23 +58,23 @@ void timer_irq(union uml_pt_regs *regs)
        if(first_tick){
 #ifdef CONFIG_UML_REAL_TIME_CLOCK
                /* We've had 1 tick */
-               unsigned long long usecs = os_usecs();
+               unsigned long long nsecs = os_nsecs();
 
-               delta += usecs - prev_usecs;
-               prev_usecs = usecs;
+               delta += nsecs - prev_nsecs;
+               prev_nsecs = nsecs;
 
                /* Protect against the host clock being set backwards */
                if(delta < 0)
                        delta = 0;
 
-               ticks += (delta * HZ) / MILLION;
-               delta -= (ticks * MILLION) / HZ;
+               ticks += (delta * HZ) / BILLION;
+               delta -= (ticks * BILLION) / HZ;
 #else
                ticks = 1;
 #endif
        }
        else {
-               prev_usecs = os_usecs();
+               prev_nsecs = os_nsecs();
                first_tick = 1;
        }
 
@@ -84,49 +84,102 @@ void timer_irq(union uml_pt_regs *regs)
        }
 }
 
-void boot_timer_handler(int sig)
+void do_boot_timer_handler(struct sigcontext * sc)
 {
        struct pt_regs regs;
 
-       CHOOSE_MODE((void) 
-                   (UPT_SC(&regs.regs) = (struct sigcontext *) (&sig + 1)),
+       CHOOSE_MODE((void) (UPT_SC(&regs.regs) = sc),
                    (void) (regs.regs.skas.is_user = 0));
        do_timer(&regs);
 }
 
+static DEFINE_SPINLOCK(timer_spinlock);
+
+static unsigned long long local_offset = 0;
+
+static inline unsigned long long get_time(void)
+{
+       unsigned long long nsecs;
+       unsigned long flags;
+
+       spin_lock_irqsave(&timer_spinlock, flags);
+       nsecs = os_nsecs();
+       nsecs += local_offset;
+       spin_unlock_irqrestore(&timer_spinlock, flags);
+
+       return nsecs;
+}
+
 irqreturn_t um_timer(int irq, void *dev, struct pt_regs *regs)
 {
+       unsigned long long nsecs;
        unsigned long flags;
 
        do_timer(regs);
+
        write_seqlock_irqsave(&xtime_lock, flags);
-       timer();
+       nsecs = get_time() + local_offset;
+       xtime.tv_sec = nsecs / NSEC_PER_SEC;
+       xtime.tv_nsec = nsecs - xtime.tv_sec * NSEC_PER_SEC;
        write_sequnlock_irqrestore(&xtime_lock, flags);
+
        return(IRQ_HANDLED);
 }
 
 long um_time(int __user *tloc)
 {
-       struct timeval now;
+       long ret = get_time() / NSEC_PER_SEC;
 
-       do_gettimeofday(&now);
-       if (tloc) {
-               if (put_user(now.tv_sec, tloc))
-                       now.tv_sec = -EFAULT;
-       }
-       return now.tv_sec;
+       if((tloc != NULL) && put_user(ret, tloc))
+               return -EFAULT;
+
+       return ret;
+}
+
+void do_gettimeofday(struct timeval *tv)
+{
+       unsigned long long nsecs = get_time();
+
+       tv->tv_sec = nsecs / NSEC_PER_SEC;
+       /* Careful about calculations here - this was originally done as
+        * (nsecs - tv->tv_sec * NSEC_PER_SEC) / NSEC_PER_USEC
+        * which gave bogus (> 1000000) values.  Dunno why, suspect gcc
+        * (4.0.0) miscompiled it, or there's a subtle 64/32-bit conversion
+        * problem that I missed.
+        */
+       nsecs -= tv->tv_sec * NSEC_PER_SEC;
+       tv->tv_usec = (unsigned long) nsecs / NSEC_PER_USEC;
+}
+
+static inline void set_time(unsigned long long nsecs)
+{
+       unsigned long long now;
+       unsigned long flags;
+
+       spin_lock_irqsave(&timer_spinlock, flags);
+       now = os_nsecs();
+       local_offset = nsecs - now;
+       spin_unlock_irqrestore(&timer_spinlock, flags);
+
+       clock_was_set();
 }
 
 long um_stime(int __user *tptr)
 {
        int value;
-       struct timespec new;
 
        if (get_user(value, tptr))
                 return -EFAULT;
-       new.tv_sec = value;
-       new.tv_nsec = 0;
-       do_settimeofday(&new);
+
+       set_time((unsigned long long) value * NSEC_PER_SEC);
+
+       return 0;
+}
+
+int do_settimeofday(struct timespec *tv)
+{
+       set_time((unsigned long long) tv->tv_sec * NSEC_PER_SEC + tv->tv_nsec);
+
        return 0;
 }
 
@@ -134,29 +187,15 @@ void timer_handler(int sig, union uml_pt_regs *regs)
 {
        local_irq_disable();
        irq_enter();
-       update_process_times(CHOOSE_MODE(user_context(UPT_SP(regs)),
-                                        (regs)->skas.is_user));
+       update_process_times(CHOOSE_MODE(
+                            (UPT_SC(regs) && user_context(UPT_SP(regs))),
+                            (regs)->skas.is_user));
        irq_exit();
        local_irq_enable();
        if(current_thread->cpu == 0)
                timer_irq(regs);
 }
 
-static DEFINE_SPINLOCK(timer_spinlock);
-
-unsigned long time_lock(void)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&timer_spinlock, flags);
-       return(flags);
-}
-
-void time_unlock(unsigned long flags)
-{
-       spin_unlock_irqrestore(&timer_spinlock, flags);
-}
-
 int __init timer_init(void)
 {
        int err;
@@ -171,14 +210,3 @@ int __init timer_init(void)
 }
 
 __initcall(timer_init);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
index 8f40e4838736988645c1960b8e302d7ca52f48db..5c1e4cc1c0493aa85bd86e7e1bc41dff1ea2c086 100644 (file)
@@ -13,7 +13,6 @@
 #include "user_util.h"
 #include "kern_util.h"
 #include "irq_user.h"
-#include "time_user.h"
 #include "mem_user.h"
 #include "os.h"
 #include "tlb.h"
index 37e22d71a0d9d5c546326e5a19276ec54524ac14..786e4edd86c51bb9f3a03766d22ec8eb7c8dc55f 100644 (file)
@@ -20,6 +20,7 @@
 #include "user_util.h"
 #include "tt.h"
 #include "sysdep/thread.h"
+#include "os.h"
 
 extern int debugger_pid;
 extern int debugger_fd;
diff --git a/arch/um/kernel/tt/include/mmu-tt.h b/arch/um/kernel/tt/include/mmu-tt.h
deleted file mode 100644 (file)
index 0440510..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __TT_MMU_H
-#define __TT_MMU_H
-
-struct mmu_context_tt {
-};
-
-#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
index 62535303aa277233241650a8c0a8686d1a6fb9c8..295c1ac817b34cc6f8d04aeab783539b5004060a 100644 (file)
@@ -18,7 +18,6 @@
 #include "os.h"
 #include "kern.h"
 #include "sigcontext.h"
-#include "time_user.h"
 #include "mem_user.h"
 #include "tlb.h"
 #include "mode.h"
index 528a5fc8d8879e85eb641bd5f0e84ea7aaca45f2..03774427d46865b37d3dffce4dd6d695d4ebb6bc 100644 (file)
@@ -20,6 +20,7 @@ Jeff Dike (jdike@karaya.com) : Modified for integration into uml
 #include "kern_util.h"
 #include "ptrace_user.h"
 #include "tt.h"
+#include "os.h"
 
 long proxy_ptrace(struct debugger *debugger, int arg1, pid_t arg2,
                  long arg3, long arg4, pid_t child, int *ret)
index a5f0e01e214e5ac4607dd6edee761baac2799859..99f178319d0397f4c23a3aaa2f5932c767d9e84e 100644 (file)
@@ -15,6 +15,7 @@ terms and conditions.
 #include "ptrace_user.h"
 #include "user_util.h"
 #include "user.h"
+#include "os.h"
 
 int get_syscall(pid_t pid, long *arg1, long *arg2, long *arg3, long *arg4, 
                long *arg5)
index a414c529fbcd78b91350db486527a1bf37742c2a..b5d9d64d91e403b82b8d7a6fe525091122b6d9a7 100644 (file)
@@ -18,7 +18,7 @@ void sig_handler_common_tt(int sig, void *sc_ptr)
 {
        struct sigcontext *sc = sc_ptr;
        struct tt_regs save_regs, *r;
-       int save_errno = errno, is_user;
+       int save_errno = errno, is_user = 0;
        void (*handler)(int, union uml_pt_regs *);
 
        /* This is done because to allow SIGSEGV to be delivered inside a SEGV
@@ -35,7 +35,8 @@ void sig_handler_common_tt(int sig, void *sc_ptr)
                 GET_FAULTINFO_FROM_SC(r->faultinfo, sc);
         }
        save_regs = *r;
-       is_user = user_context(SC_SP(sc));
+       if (sc)
+               is_user = user_context(SC_SP(sc));
        r->sc = sc;
        if(sig != SIGUSR2) 
                r->syscall = -1;
index 40c7d6b1df6804e01ae6576d19a59e73e52e9cc1..08a4e628b24cd6e42f6ba4fec3e360ead3dd7314 100644 (file)
@@ -5,12 +5,12 @@
 
 obj-y = aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \
        start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o user_syms.o \
-       drivers/ sys-$(SUBARCH)/
+       util.o drivers/ sys-$(SUBARCH)/
 
 obj-$(CONFIG_MODE_SKAS) += skas/
 
 USER_OBJS := aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \
-       start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o
+       start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o util.o
 
 elf_aux.o: $(ARCH_DIR)/kernel-offsets.h
 CFLAGS_elf_aux.o += -I$(objtree)/arch/um
index 36cc8475bcdacc76aba1d932a7d2a41eff6392d8..6490a4ff40ac458cdd96ee61128aeefb81303a43 100644 (file)
@@ -60,7 +60,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv,
 
        if((stack_out != NULL) && (*stack_out != 0))
                stack = *stack_out;
-       else stack = alloc_stack(0, um_in_interrupt());
+       else stack = alloc_stack(0, __cant_sleep());
        if(stack == 0)
                return(-ENOMEM);
 
@@ -124,7 +124,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
        unsigned long stack, sp;
        int pid, status, err;
 
-       stack = alloc_stack(stack_order, um_in_interrupt());
+       stack = alloc_stack(stack_order, __cant_sleep());
        if(stack == 0) return(-ENOMEM);
 
        sp = stack + (page_size() << stack_order) - sizeof(void *);
index 172c8474453c9c4baa229d66c0d619ba243d40f3..2878e89a674f0190108f4c527feb637de85ca6dc 100644 (file)
@@ -16,7 +16,6 @@
 #include "user_util.h"
 #include "kern_util.h"
 #include "mem_user.h"
-#include "time_user.h"
 #include "irq_user.h"
 #include "user.h"
 #include "init.h"
@@ -82,20 +81,8 @@ extern void scan_elf_aux( char **envp);
 int main(int argc, char **argv, char **envp)
 {
        char **new_argv;
-       sigset_t mask;
        int ret, i, err;
 
-       /* Enable all signals except SIGIO - in some environments, we can
-        * enter with some signals blocked
-        */
-
-       sigemptyset(&mask);
-       sigaddset(&mask, SIGIO);
-       if(sigprocmask(SIG_SETMASK, &mask, NULL) < 0){
-               perror("sigprocmask");
-               exit(1);
-       }
-
 #ifdef UML_CONFIG_CMDLINE_ON_HOST
        /* Allocate memory for thread command lines */
        if(argc < 2 || strlen(argv[1]) < THREAD_NAME_LEN - 1){
index 39815c6b5e4510ecd0fdaef22c73fc87a82c06ef..7f5e2dac2a35ddc3a14eee73613a17d2e9911269 100644 (file)
@@ -18,6 +18,7 @@
 #include "process.h"
 #include "irq_user.h"
 #include "kern_util.h"
+#include "longjmp.h"
 
 #define ARBITRARY_ADDR -1
 #define FAILURE_PID    -1
@@ -205,24 +206,13 @@ void init_new_thread_signals(int altstack)
 
 int run_kernel_thread(int (*fn)(void *), void *arg, void **jmp_ptr)
 {
-       sigjmp_buf buf;
-       int n;
-
-       *jmp_ptr = &buf;
-       n = sigsetjmp(buf, 1);
-       if(n != 0)
-               return(n);
-       (*fn)(arg);
-       return(0);
+       sigjmp_buf buf;
+       int n, enable;
+
+       *jmp_ptr = &buf;
+       n = UML_SIGSETJMP(&buf, enable);
+       if(n != 0)
+               return(n);
+       (*fn)(arg);
+       return(0);
 }
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
index c1f46a0fef13d14c8740d80091fb18a2cf0690d7..f11b3124a0c86cd22850e178e7c234fe20f9ad7e 100644 (file)
 #include <string.h>
 #include <sys/mman.h>
 #include "user_util.h"
-#include "kern_util.h"
 #include "user.h"
 #include "signal_kern.h"
 #include "sysdep/sigcontext.h"
 #include "sysdep/signal.h"
 #include "sigcontext.h"
-#include "time_user.h"
 #include "mode.h"
+#include "os.h"
+
+/* These are the asynchronous signals.  SIGVTALRM and SIGARLM are handled
+ * together under SIGVTALRM_BIT.  SIGPROF is excluded because we want to
+ * be able to profile all of UML, not just the non-critical sections.  If
+ * profiling is not thread-safe, then that is not my problem.  We can disable
+ * profiling when SMP is enabled in that case.
+ */
+#define SIGIO_BIT 0
+#define SIGIO_MASK (1 << SIGIO_BIT)
+
+#define SIGVTALRM_BIT 1
+#define SIGVTALRM_MASK (1 << SIGVTALRM_BIT)
+
+#define SIGALRM_BIT 2
+#define SIGALRM_MASK (1 << SIGALRM_BIT)
+
+static int signals_enabled = 1;
+static int pending = 0;
 
 void sig_handler(ARCH_SIGHDLR_PARAM)
 {
        struct sigcontext *sc;
+       int enabled;
+
+       /* Must be the first thing that this handler does - x86_64 stores
+        * the sigcontext in %rdx, and we need to save it before it has a
+        * chance to get trashed.
+        */
 
        ARCH_GET_SIGCONTEXT(sc, sig);
+
+       enabled = signals_enabled;
+       if(!enabled && (sig == SIGIO)){
+               pending |= SIGIO_MASK;
+               return;
+       }
+
+       block_signals();
+
        CHOOSE_MODE_PROC(sig_handler_common_tt, sig_handler_common_skas,
                         sig, sc);
+
+       set_signals(enabled);
 }
 
 extern int timer_irq_inited;
 
-void alarm_handler(ARCH_SIGHDLR_PARAM)
+static void real_alarm_handler(int sig, struct sigcontext *sc)
 {
-       struct sigcontext *sc;
-
-       ARCH_GET_SIGCONTEXT(sc, sig);
-       if(!timer_irq_inited) return;
+       if(!timer_irq_inited){
+               signals_enabled = 1;
+               return;
+       }
 
        if(sig == SIGALRM)
                switch_timers(0);
@@ -47,6 +81,52 @@ void alarm_handler(ARCH_SIGHDLR_PARAM)
 
        if(sig == SIGALRM)
                switch_timers(1);
+
+}
+
+void alarm_handler(ARCH_SIGHDLR_PARAM)
+{
+       struct sigcontext *sc;
+       int enabled;
+
+       ARCH_GET_SIGCONTEXT(sc, sig);
+
+       enabled = signals_enabled;
+       if(!signals_enabled){
+               if(sig == SIGVTALRM)
+                       pending |= SIGVTALRM_MASK;
+               else pending |= SIGALRM_MASK;
+
+               return;
+       }
+
+       block_signals();
+
+       real_alarm_handler(sig, sc);
+       set_signals(enabled);
+}
+
+extern void do_boot_timer_handler(struct sigcontext * sc);
+
+void boot_timer_handler(ARCH_SIGHDLR_PARAM)
+{
+       struct sigcontext *sc;
+       int enabled;
+
+       ARCH_GET_SIGCONTEXT(sc, sig);
+
+       enabled = signals_enabled;
+       if(!enabled){
+               if(sig == SIGVTALRM)
+                       pending |= SIGVTALRM_MASK;
+               else pending |= SIGALRM_MASK;
+               return;
+       }
+
+       block_signals();
+
+       do_boot_timer_handler(sc);
+       set_signals(enabled);
 }
 
 void set_sigstack(void *sig_stack, int size)
@@ -73,6 +153,7 @@ void set_handler(int sig, void (*handler)(int), int flags, ...)
 {
        struct sigaction action;
        va_list ap;
+       sigset_t sig_mask;
        int mask;
 
        va_start(ap, flags);
@@ -85,7 +166,12 @@ void set_handler(int sig, void (*handler)(int), int flags, ...)
        action.sa_flags = flags;
        action.sa_restorer = NULL;
        if(sigaction(sig, &action, NULL) < 0)
-               panic("sigaction failed");
+               panic("sigaction failed - errno = %d\n", errno);
+
+       sigemptyset(&sig_mask);
+       sigaddset(&sig_mask, sig);
+       if(sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0)
+               panic("sigprocmask failed - errno = %d\n", errno);
 }
 
 int change_sig(int signal, int on)
@@ -98,89 +184,77 @@ int change_sig(int signal, int on)
        return(!sigismember(&old, signal));
 }
 
-/* Both here and in set/get_signal we don't touch SIGPROF, because we must not
- * disable profiling; it's safe because the profiling code does not interact
- * with the kernel code at all.*/
-
-static void change_signals(int type)
-{
-       sigset_t mask;
-
-       sigemptyset(&mask);
-       sigaddset(&mask, SIGVTALRM);
-       sigaddset(&mask, SIGALRM);
-       sigaddset(&mask, SIGIO);
-       if(sigprocmask(type, &mask, NULL) < 0)
-               panic("Failed to change signal mask - errno = %d", errno);
-}
-
 void block_signals(void)
 {
-       change_signals(SIG_BLOCK);
+       signals_enabled = 0;
 }
 
 void unblock_signals(void)
 {
-       change_signals(SIG_UNBLOCK);
-}
+       int save_pending;
 
-/* These are the asynchronous signals.  SIGVTALRM and SIGARLM are handled
- * together under SIGVTALRM_BIT.  SIGPROF is excluded because we want to
- * be able to profile all of UML, not just the non-critical sections.  If
- * profiling is not thread-safe, then that is not my problem.  We can disable
- * profiling when SMP is enabled in that case.
- */
-#define SIGIO_BIT 0
-#define SIGVTALRM_BIT 1
+       if(signals_enabled == 1)
+               return;
 
-static int enable_mask(sigset_t *mask)
-{
-       int sigs;
+       /* We loop because the IRQ handler returns with interrupts off.  So,
+        * interrupts may have arrived and we need to re-enable them and
+        * recheck pending.
+        */
+       while(1){
+               /* Save and reset save_pending after enabling signals.  This
+                * way, pending won't be changed while we're reading it.
+                */
+               signals_enabled = 1;
+
+               save_pending = pending;
+               if(save_pending == 0)
+                       return;
+
+               pending = 0;
+
+               /* We have pending interrupts, so disable signals, as the
+                * handlers expect them off when they are called.  They will
+                * be enabled again above.
+                */
+
+               signals_enabled = 0;
 
-       sigs = sigismember(mask, SIGIO) ? 0 : 1 << SIGIO_BIT;
-       sigs |= sigismember(mask, SIGVTALRM) ? 0 : 1 << SIGVTALRM_BIT;
-       sigs |= sigismember(mask, SIGALRM) ? 0 : 1 << SIGVTALRM_BIT;
-       return(sigs);
+               /* Deal with SIGIO first because the alarm handler might
+                * schedule, leaving the pending SIGIO stranded until we come
+                * back here.
+                */
+               if(save_pending & SIGIO_MASK)
+                       CHOOSE_MODE_PROC(sig_handler_common_tt,
+                                        sig_handler_common_skas, SIGIO, NULL);
+
+               if(save_pending & SIGALRM_MASK)
+                       real_alarm_handler(SIGALRM, NULL);
+
+               if(save_pending & SIGVTALRM_MASK)
+                       real_alarm_handler(SIGVTALRM, NULL);
+       }
 }
 
 int get_signals(void)
 {
-       sigset_t mask;
-
-       if(sigprocmask(SIG_SETMASK, NULL, &mask) < 0)
-               panic("Failed to get signal mask");
-       return(enable_mask(&mask));
+       return signals_enabled;
 }
 
 int set_signals(int enable)
 {
-       sigset_t mask;
        int ret;
+       if(signals_enabled == enable)
+               return enable;
 
-       sigemptyset(&mask);
-       if(enable & (1 << SIGIO_BIT))
-               sigaddset(&mask, SIGIO);
-       if(enable & (1 << SIGVTALRM_BIT)){
-               sigaddset(&mask, SIGVTALRM);
-               sigaddset(&mask, SIGALRM);
-       }
+       ret = signals_enabled;
+       if(enable)
+               unblock_signals();
+       else block_signals();
 
-       /* This is safe - sigprocmask is guaranteed to copy locally the
-        * value of new_set, do his work and then, at the end, write to
-        * old_set.
-        */
-       if(sigprocmask(SIG_UNBLOCK, &mask, &mask) < 0)
-               panic("Failed to enable signals");
-       ret = enable_mask(&mask);
-       sigemptyset(&mask);
-       if((enable & (1 << SIGIO_BIT)) == 0)
-               sigaddset(&mask, SIGIO);
-       if((enable & (1 << SIGVTALRM_BIT)) == 0){
-               sigaddset(&mask, SIGVTALRM);
-               sigaddset(&mask, SIGALRM);
-       }
-       if(sigprocmask(SIG_BLOCK, &mask, NULL) < 0)
-               panic("Failed to block signals");
+       return ret;
+}
 
-       return(ret);
+void os_usr1_signal(int on)
+{
+       change_sig(SIGUSR1, on);
 }
index eab5386d60a7e9fea06bc092dbf77ccff47807e0..5fd8d4dad66a123501d9aff333d9bef809bba881 100644 (file)
@@ -3,8 +3,8 @@
 # Licensed under the GPL
 #
 
-obj-y := trap.o
+obj-y := mem.o process.o trap.o
 
-USER_OBJS := trap.o
+USER_OBJS := mem.o process.o trap.o
 
 include arch/um/scripts/Makefile.rules
similarity index 52%
rename from arch/um/kernel/skas/mem_user.c
rename to arch/um/os-Linux/skas/mem.c
index 1d89640bd5028852c5cf180534a37237ebaed8c3..9890e9090f584dc125d82d0a65889690ad257567 100644 (file)
@@ -1,4 +1,4 @@
-/* 
+/*
  * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  * Licensed under the GPL
  */
@@ -32,7 +32,7 @@ extern void wait_stub_done(int pid, int sig, char * fname);
 static inline unsigned long *check_init_stack(struct mm_id * mm_idp,
                                              unsigned long *stack)
 {
-       if(stack == NULL){
+       if(stack == NULL) {
                stack = (unsigned long *) mm_idp->stack + 2;
                *stack = 0;
        }
@@ -45,13 +45,14 @@ int single_count = 0;
 int multi_count = 0;
 int multi_op_count = 0;
 
-static long do_syscall_stub(struct mm_id *mm_idp, void **addr)
+static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
 {
        unsigned long regs[MAX_REG_NR];
-       unsigned long *data;
-       unsigned long *syscall;
+       int n;
        long ret, offset;
-        int n, pid = mm_idp->u.pid;
+       unsigned long * data;
+       unsigned long * syscall;
+       int pid = mm_idp->u.pid;
 
        if(proc_mm)
 #warning Need to look up userspace_pid by cpu
@@ -59,10 +60,11 @@ static long do_syscall_stub(struct mm_id *mm_idp, void **addr)
 
        multi_count++;
 
-        get_safe_registers(regs);
-        regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
+       get_safe_registers(regs);
+       regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
                ((unsigned long) &batch_syscall_stub -
-                 (unsigned long) &__syscall_stub_start);
+                (unsigned long) &__syscall_stub_start);
+
        n = ptrace_setregs(pid, regs);
        if(n < 0)
                panic("do_syscall_stub : PTRACE_SETREGS failed, errno = %d\n",
@@ -80,6 +82,8 @@ static long do_syscall_stub(struct mm_id *mm_idp, void **addr)
        if (offset) {
                data = (unsigned long *)(mm_idp->stack +
                                         offset - UML_CONFIG_STUB_DATA);
+               printk("do_syscall_stub : ret = %d, offset = %d, "
+                      "data = 0x%x\n", ret, offset, data);
                syscall = (unsigned long *)((unsigned long)data + data[0]);
                printk("do_syscall_stub: syscall %ld failed, return value = "
                       "0x%lx, expected return value = 0x%lx\n",
@@ -107,32 +111,32 @@ static long do_syscall_stub(struct mm_id *mm_idp, void **addr)
 
 long run_syscall_stub(struct mm_id * mm_idp, int syscall,
                      unsigned long *args, long expected, void **addr,
-                     int done)
+                     int done)
 {
-       unsigned long *stack = check_init_stack(mm_idp, *addr);
+       unsigned long *stack = check_init_stack(mm_idp, *addr);
 
        if(done && *addr == NULL)
                single_count++;
 
-       *stack += sizeof(long);
+       *stack += sizeof(long);
        stack += *stack / sizeof(long);
 
-        *stack++ = syscall;
-        *stack++ = args[0];
-        *stack++ = args[1];
-        *stack++ = args[2];
-        *stack++ = args[3];
-        *stack++ = args[4];
-        *stack++ = args[5];
+       *stack++ = syscall;
+       *stack++ = args[0];
+       *stack++ = args[1];
+       *stack++ = args[2];
+       *stack++ = args[3];
+       *stack++ = args[4];
+       *stack++ = args[5];
        *stack++ = expected;
-        *stack = 0;
-        multi_op_count++;
+       *stack = 0;
+       multi_op_count++;
 
-        if(!done && ((((unsigned long) stack) & ~PAGE_MASK) <
+       if(!done && ((((unsigned long) stack) & ~PAGE_MASK) <
                     PAGE_SIZE - 10 * sizeof(long))){
                *addr = stack;
-                return 0;
-        }
+               return 0;
+       }
 
        return do_syscall_stub(mm_idp, addr);
 }
@@ -150,7 +154,7 @@ long syscall_stub_data(struct mm_id * mm_idp,
        if((((unsigned long) *addr) & ~PAGE_MASK) >=
           PAGE_SIZE - (10 + data_count) * sizeof(long)) {
                ret = do_syscall_stub(mm_idp, addr);
-               /* in case of error, don't overwrite data on stack */
+               /* in case of error, don't overwrite data on stack */
                if(ret)
                        return ret;
        }
@@ -172,39 +176,39 @@ int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len,
        int r, int w, int x, int phys_fd, unsigned long long offset,
        int done, void **data)
 {
-        int prot, ret;
-
-        prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
-                (x ? PROT_EXEC : 0);
-
-        if(proc_mm){
-                struct proc_mm_op map;
-                int fd = mm_idp->u.mm_fd;
-
-                map = ((struct proc_mm_op) { .op       = MM_MMAP,
-                                             .u                =
-                                             { .mmap   =
-                                               { .addr = virt,
-                                                 .len  = len,
-                                                 .prot = prot,
-                                                 .flags        = MAP_SHARED |
-                                                 MAP_FIXED,
-                                                 .fd   = phys_fd,
-                                                 .offset= offset
-                                               } } } );
+       int prot, ret;
+
+       prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
+               (x ? PROT_EXEC : 0);
+
+       if(proc_mm){
+               struct proc_mm_op map;
+               int fd = mm_idp->u.mm_fd;
+
+               map = ((struct proc_mm_op) { .op        = MM_MMAP,
+                                      .u               =
+                                      { .mmap  =
+                                        { .addr        = virt,
+                                          .len = len,
+                                          .prot        = prot,
+                                          .flags       = MAP_SHARED |
+                                          MAP_FIXED,
+                                          .fd  = phys_fd,
+                                          .offset= offset
+                                        } } } );
                ret = os_write_file(fd, &map, sizeof(map));
                if(ret != sizeof(map))
                        printk("map : /proc/mm map failed, err = %d\n", -ret);
                else ret = 0;
-        }
-        else {
-                unsigned long args[] = { virt, len, prot,
-                                         MAP_SHARED | MAP_FIXED, phys_fd,
-                                         MMAP_OFFSET(offset) };
+       }
+       else {
+               unsigned long args[] = { virt, len, prot,
+                                        MAP_SHARED | MAP_FIXED, phys_fd,
+                                        MMAP_OFFSET(offset) };
 
                ret = run_syscall_stub(mm_idp, STUB_MMAP_NR, args, virt,
                                       data, done);
-        }
+       }
 
        return ret;
 }
@@ -212,68 +216,66 @@ int map(struct mm_id * mm_idp, unsigned long virt, unsigned long len,
 int unmap(struct mm_id * mm_idp, void *addr, unsigned long len, int done,
          void **data)
 {
-        int ret;
-
-        if(proc_mm){
-                struct proc_mm_op unmap;
-                int fd = mm_idp->u.mm_fd;
-
-                unmap = ((struct proc_mm_op) { .op     = MM_MUNMAP,
-                                               .u      =
-                                               { .munmap       =
-                                                 { .addr       =
-                                                   (unsigned long) addr,
-                                                   .len                = len } } } );
+       int ret;
+
+       if(proc_mm){
+               struct proc_mm_op unmap;
+               int fd = mm_idp->u.mm_fd;
+
+               unmap = ((struct proc_mm_op) { .op      = MM_MUNMAP,
+                                        .u     =
+                                        { .munmap      =
+                                          { .addr      =
+                                            (unsigned long) addr,
+                                            .len               = len } } } );
                ret = os_write_file(fd, &unmap, sizeof(unmap));
                if(ret != sizeof(unmap))
                        printk("unmap - proc_mm write returned %d\n", ret);
                else ret = 0;
-        }
-        else {
-                unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0,
-                                         0 };
+       }
+       else {
+               unsigned long args[] = { (unsigned long) addr, len, 0, 0, 0,
+                                        0 };
 
                ret = run_syscall_stub(mm_idp, __NR_munmap, args, 0,
                                       data, done);
-                if(ret < 0)
-                        printk("munmap stub failed, errno = %d\n", ret);
-        }
+       }
 
-        return ret;
+       return ret;
 }
 
 int protect(struct mm_id * mm_idp, unsigned long addr, unsigned long len,
            int r, int w, int x, int done, void **data)
 {
-        struct proc_mm_op protect;
-        int prot, ret;
-
-        prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
-                (x ? PROT_EXEC : 0);
-
-        if(proc_mm){
-                int fd = mm_idp->u.mm_fd;
-                protect = ((struct proc_mm_op) { .op   = MM_MPROTECT,
-                                                 .u    =
-                                                 { .mprotect   =
-                                                   { .addr     =
-                                                     (unsigned long) addr,
-                                                     .len      = len,
-                                                     .prot     = prot } } } );
-
-                ret = os_write_file(fd, &protect, sizeof(protect));
-                if(ret != sizeof(protect))
-                        printk("protect failed, err = %d", -ret);
-                else ret = 0;
-        }
-        else {
-                unsigned long args[] = { addr, len, prot, 0, 0, 0 };
-
-                ret = run_syscall_stub(mm_idp, __NR_mprotect, args, 0,
-                                       data, done);
-        }
-
-        return ret;
+       struct proc_mm_op protect;
+       int prot, ret;
+
+       prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) |
+               (x ? PROT_EXEC : 0);
+       if(proc_mm){
+               int fd = mm_idp->u.mm_fd;
+
+               protect = ((struct proc_mm_op) { .op    = MM_MPROTECT,
+                                          .u   =
+                                          { .mprotect  =
+                                            { .addr    =
+                                              (unsigned long) addr,
+                                              .len     = len,
+                                              .prot    = prot } } } );
+
+               ret = os_write_file(fd, &protect, sizeof(protect));
+               if(ret != sizeof(protect))
+                       printk("protect failed, err = %d", -ret);
+               else ret = 0;
+       }
+       else {
+               unsigned long args[] = { addr, len, prot, 0, 0, 0 };
+
+               ret = run_syscall_stub(mm_idp, __NR_mprotect, args, 0,
+                                      data, done);
+       }
+
+       return ret;
 }
 
 void before_mem_skas(unsigned long unused)
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
new file mode 100644 (file)
index 0000000..120a21c
--- /dev/null
@@ -0,0 +1,566 @@
+/*
+ * Copyright (C) 2002- 2004 Jeff Dike (jdike@addtoit.com)
+ * Licensed under the GPL
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <signal.h>
+#include <setjmp.h>
+#include <sched.h>
+#include "ptrace_user.h"
+#include <sys/wait.h>
+#include <sys/mman.h>
+#include <sys/user.h>
+#include <sys/time.h>
+#include <asm/unistd.h>
+#include <asm/types.h>
+#include "user.h"
+#include "sysdep/ptrace.h"
+#include "user_util.h"
+#include "kern_util.h"
+#include "skas.h"
+#include "stub-data.h"
+#include "mm_id.h"
+#include "sysdep/sigcontext.h"
+#include "sysdep/stub.h"
+#include "os.h"
+#include "proc_mm.h"
+#include "skas_ptrace.h"
+#include "chan_user.h"
+#include "registers.h"
+#include "mem.h"
+#include "uml-config.h"
+#include "process.h"
+#include "longjmp.h"
+
+int is_skas_winch(int pid, int fd, void *data)
+{
+       if(pid != os_getpgrp())
+               return(0);
+
+       register_winch_irq(-1, fd, -1, data);
+       return(1);
+}
+
+void wait_stub_done(int pid, int sig, char * fname)
+{
+       int n, status, err;
+
+       do {
+               if ( sig != -1 ) {
+                       err = ptrace(PTRACE_CONT, pid, 0, sig);
+                       if(err)
+                               panic("%s : continue failed, errno = %d\n",
+                                     fname, errno);
+               }
+               sig = 0;
+
+               CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
+       } while((n >= 0) && WIFSTOPPED(status) &&
+               ((WSTOPSIG(status) == SIGVTALRM) ||
+                /* running UML inside a detached screen can cause
+                 * SIGWINCHes
+                 */
+                (WSTOPSIG(status) == SIGWINCH)));
+
+       if((n < 0) || !WIFSTOPPED(status) ||
+          (WSTOPSIG(status) != SIGUSR1 && WSTOPSIG(status) != SIGTRAP)){
+               unsigned long regs[HOST_FRAME_SIZE];
+
+               if(ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
+                       printk("Failed to get registers from stub, "
+                              "errno = %d\n", errno);
+               else {
+                       int i;
+
+                       printk("Stub registers -\n");
+                       for(i = 0; i < HOST_FRAME_SIZE; i++)
+                               printk("\t%d - %lx\n", i, regs[i]);
+               }
+               panic("%s : failed to wait for SIGUSR1/SIGTRAP, "
+                     "pid = %d, n = %d, errno = %d, status = 0x%x\n",
+                     fname, pid, n, errno, status);
+       }
+}
+
+extern unsigned long current_stub_stack(void);
+
+void get_skas_faultinfo(int pid, struct faultinfo * fi)
+{
+       int err;
+
+       if(ptrace_faultinfo){
+               err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
+               if(err)
+                       panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, "
+                             "errno = %d\n", errno);
+
+               /* Special handling for i386, which has different structs */
+               if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
+                       memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
+                              sizeof(struct faultinfo) -
+                              sizeof(struct ptrace_faultinfo));
+       }
+       else {
+               wait_stub_done(pid, SIGSEGV, "get_skas_faultinfo");
+
+               /* faultinfo is prepared by the stub-segv-handler at start of
+                * the stub stack page. We just have to copy it.
+                */
+               memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
+       }
+}
+
+static void handle_segv(int pid, union uml_pt_regs * regs)
+{
+       get_skas_faultinfo(pid, &regs->skas.faultinfo);
+       segv(regs->skas.faultinfo, 0, 1, NULL);
+}
+
+/*To use the same value of using_sysemu as the caller, ask it that value (in local_using_sysemu)*/
+static void handle_trap(int pid, union uml_pt_regs *regs, int local_using_sysemu)
+{
+       int err, status;
+
+       /* Mark this as a syscall */
+       UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->skas.regs);
+
+       if (!local_using_sysemu)
+       {
+               err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET,
+                            __NR_getpid);
+               if(err < 0)
+                       panic("handle_trap - nullifying syscall failed errno = %d\n",
+                             errno);
+
+               err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
+               if(err < 0)
+                       panic("handle_trap - continuing to end of syscall failed, "
+                             "errno = %d\n", errno);
+
+               CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED));
+               if((err < 0) || !WIFSTOPPED(status) ||
+                  (WSTOPSIG(status) != SIGTRAP + 0x80))
+                       panic("handle_trap - failed to wait at end of syscall, "
+                             "errno = %d, status = %d\n", errno, status);
+       }
+
+       handle_syscall(regs);
+}
+
+extern int __syscall_stub_start;
+
+static int userspace_tramp(void *stack)
+{
+       void *addr;
+
+       ptrace(PTRACE_TRACEME, 0, 0, 0);
+
+       init_new_thread_signals(1);
+       enable_timer();
+
+       if(!proc_mm){
+               /* This has a pte, but it can't be mapped in with the usual
+                * tlb_flush mechanism because this is part of that mechanism
+                */
+               int fd;
+               __u64 offset;
+               fd = phys_mapping(to_phys(&__syscall_stub_start), &offset);
+               addr = mmap64((void *) UML_CONFIG_STUB_CODE, page_size(),
+                             PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
+               if(addr == MAP_FAILED){
+                       printk("mapping mmap stub failed, errno = %d\n",
+                              errno);
+                       exit(1);
+               }
+
+               if(stack != NULL){
+                       fd = phys_mapping(to_phys(stack), &offset);
+                       addr = mmap((void *) UML_CONFIG_STUB_DATA, page_size(),
+                                   PROT_READ | PROT_WRITE,
+                                   MAP_FIXED | MAP_SHARED, fd, offset);
+                       if(addr == MAP_FAILED){
+                               printk("mapping segfault stack failed, "
+                                      "errno = %d\n", errno);
+                               exit(1);
+                       }
+               }
+       }
+       if(!ptrace_faultinfo && (stack != NULL)){
+               unsigned long v = UML_CONFIG_STUB_CODE +
+                                 (unsigned long) stub_segv_handler -
+                                 (unsigned long) &__syscall_stub_start;
+
+               set_sigstack((void *) UML_CONFIG_STUB_DATA, page_size());
+               set_handler(SIGSEGV, (void *) v, SA_ONSTACK,
+                           SIGIO, SIGWINCH, SIGALRM, SIGVTALRM,
+                           SIGUSR1, -1);
+       }
+
+       os_stop_process(os_getpid());
+       return(0);
+}
+
+/* Each element set once, and only accessed by a single processor anyway */
+#undef NR_CPUS
+#define NR_CPUS 1
+int userspace_pid[NR_CPUS];
+
+int start_userspace(unsigned long stub_stack)
+{
+       void *stack;
+       unsigned long sp;
+       int pid, status, n, flags;
+
+       stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
+                    MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+       if(stack == MAP_FAILED)
+               panic("start_userspace : mmap failed, errno = %d", errno);
+       sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
+
+       flags = CLONE_FILES | SIGCHLD;
+       if(proc_mm) flags |= CLONE_VM;
+       pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
+       if(pid < 0)
+               panic("start_userspace : clone failed, errno = %d", errno);
+
+       do {
+               CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
+               if(n < 0)
+                       panic("start_userspace : wait failed, errno = %d",
+                             errno);
+       } while(WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM));
+
+       if(!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP))
+               panic("start_userspace : expected SIGSTOP, got status = %d",
+                     status);
+
+       if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL, (void *)PTRACE_O_TRACESYSGOOD) < 0)
+               panic("start_userspace : PTRACE_OLDSETOPTIONS failed, errno=%d\n",
+                     errno);
+
+       if(munmap(stack, PAGE_SIZE) < 0)
+               panic("start_userspace : munmap failed, errno = %d\n", errno);
+
+       return(pid);
+}
+
+void userspace(union uml_pt_regs *regs)
+{
+       int err, status, op, pid = userspace_pid[0];
+       int local_using_sysemu; /*To prevent races if using_sysemu changes under us.*/
+
+       while(1){
+               restore_registers(pid, regs);
+
+               /* Now we set local_using_sysemu to be used for one loop */
+               local_using_sysemu = get_using_sysemu();
+
+               op = SELECT_PTRACE_OPERATION(local_using_sysemu, singlestepping(NULL));
+
+               err = ptrace(op, pid, 0, 0);
+               if(err)
+                       panic("userspace - could not resume userspace process, "
+                             "pid=%d, ptrace operation = %d, errno = %d\n",
+                             op, errno);
+
+               CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED));
+               if(err < 0)
+                       panic("userspace - waitpid failed, errno = %d\n",
+                             errno);
+
+               regs->skas.is_user = 1;
+               save_registers(pid, regs);
+               UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
+
+               if(WIFSTOPPED(status)){
+                       switch(WSTOPSIG(status)){
+                       case SIGSEGV:
+                               if(PTRACE_FULL_FAULTINFO || !ptrace_faultinfo)
+                                       user_signal(SIGSEGV, regs, pid);
+                               else handle_segv(pid, regs);
+                               break;
+                       case SIGTRAP + 0x80:
+                               handle_trap(pid, regs, local_using_sysemu);
+                               break;
+                       case SIGTRAP:
+                               relay_signal(SIGTRAP, regs);
+                               break;
+                       case SIGIO:
+                       case SIGVTALRM:
+                       case SIGILL:
+                       case SIGBUS:
+                       case SIGFPE:
+                       case SIGWINCH:
+                               user_signal(WSTOPSIG(status), regs, pid);
+                               break;
+                       default:
+                               printk("userspace - child stopped with signal "
+                                      "%d\n", WSTOPSIG(status));
+                       }
+                       pid = userspace_pid[0];
+                       interrupt_end();
+
+                       /* Avoid -ERESTARTSYS handling in host */
+                       if(PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
+                               PT_SYSCALL_NR(regs->skas.regs) = -1;
+               }
+       }
+}
+#define INIT_JMP_NEW_THREAD 0
+#define INIT_JMP_REMOVE_SIGSTACK 1
+#define INIT_JMP_CALLBACK 2
+#define INIT_JMP_HALT 3
+#define INIT_JMP_REBOOT 4
+
+int copy_context_skas0(unsigned long new_stack, int pid)
+{
+       int err;
+       unsigned long regs[MAX_REG_NR];
+       unsigned long current_stack = current_stub_stack();
+       struct stub_data *data = (struct stub_data *) current_stack;
+       struct stub_data *child_data = (struct stub_data *) new_stack;
+       __u64 new_offset;
+       int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
+
+       /* prepare offset and fd of child's stack as argument for parent's
+        * and child's mmap2 calls
+        */
+       *data = ((struct stub_data) { .offset   = MMAP_OFFSET(new_offset),
+                                     .fd       = new_fd,
+                                     .timer    = ((struct itimerval)
+                                                   { { 0, 1000000 / hz() },
+                                                     { 0, 1000000 / hz() }})});
+       get_safe_registers(regs);
+
+       /* Set parent's instruction pointer to start of clone-stub */
+       regs[REGS_IP_INDEX] = UML_CONFIG_STUB_CODE +
+                               (unsigned long) stub_clone_handler -
+                               (unsigned long) &__syscall_stub_start;
+       regs[REGS_SP_INDEX] = UML_CONFIG_STUB_DATA + PAGE_SIZE -
+               sizeof(void *);
+#ifdef __SIGNAL_FRAMESIZE
+       regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
+#endif
+       err = ptrace_setregs(pid, regs);
+       if(err < 0)
+               panic("copy_context_skas0 : PTRACE_SETREGS failed, "
+                     "pid = %d, errno = %d\n", pid, errno);
+
+       /* set a well known return code for detection of child write failure */
+       child_data->err = 12345678;
+
+       /* Wait, until parent has finished its work: read child's pid from
+        * parent's stack, and check, if bad result.
+        */
+       wait_stub_done(pid, 0, "copy_context_skas0");
+
+       pid = data->err;
+       if(pid < 0)
+               panic("copy_context_skas0 - stub-parent reports error %d\n",
+                     pid);
+
+       /* Wait, until child has finished too: read child's result from
+        * child's stack and check it.
+        */
+       wait_stub_done(pid, -1, "copy_context_skas0");
+       if (child_data->err != UML_CONFIG_STUB_DATA)
+               panic("copy_context_skas0 - stub-child reports error %d\n",
+                     child_data->err);
+
+       if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
+                  (void *)PTRACE_O_TRACESYSGOOD) < 0)
+               panic("copy_context_skas0 : PTRACE_OLDSETOPTIONS failed, "
+                     "errno = %d\n", errno);
+
+       return pid;
+}
+
+/*
+ * This is used only, if stub pages are needed, while proc_mm is
+ * availabl. Opening /proc/mm creates a new mm_context, which lacks
+ * the stub-pages. Thus, we map them using /proc/mm-fd
+ */
+void map_stub_pages(int fd, unsigned long code,
+                   unsigned long data, unsigned long stack)
+{
+       struct proc_mm_op mmop;
+       int n;
+       __u64 code_offset;
+       int code_fd = phys_mapping(to_phys((void *) &__syscall_stub_start),
+                                  &code_offset);
+
+       mmop = ((struct proc_mm_op) { .op        = MM_MMAP,
+                                     .u         =
+                                     { .mmap    =
+                                       { .addr    = code,
+                                         .len     = PAGE_SIZE,
+                                         .prot    = PROT_EXEC,
+                                         .flags   = MAP_FIXED | MAP_PRIVATE,
+                                         .fd      = code_fd,
+                                         .offset  = code_offset
+       } } });
+       n = os_write_file(fd, &mmop, sizeof(mmop));
+       if(n != sizeof(mmop))
+               panic("map_stub_pages : /proc/mm map for code failed, "
+                     "err = %d\n", -n);
+
+       if ( stack ) {
+               __u64 map_offset;
+               int map_fd = phys_mapping(to_phys((void *)stack), &map_offset);
+               mmop = ((struct proc_mm_op)
+                               { .op        = MM_MMAP,
+                                 .u         =
+                                 { .mmap    =
+                                   { .addr    = data,
+                                     .len     = PAGE_SIZE,
+                                     .prot    = PROT_READ | PROT_WRITE,
+                                     .flags   = MAP_FIXED | MAP_SHARED,
+                                     .fd      = map_fd,
+                                     .offset  = map_offset
+               } } });
+               n = os_write_file(fd, &mmop, sizeof(mmop));
+               if(n != sizeof(mmop))
+                       panic("map_stub_pages : /proc/mm map for data failed, "
+                             "err = %d\n", -n);
+       }
+}
+
+void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr,
+               void (*handler)(int))
+{
+       unsigned long flags;
+       sigjmp_buf switch_buf, fork_buf;
+       int enable;
+
+       *switch_buf_ptr = &switch_buf;
+       *fork_buf_ptr = &fork_buf;
+
+       /* Somewhat subtle - siglongjmp restores the signal mask before doing
+        * the longjmp.  This means that when jumping from one stack to another
+        * when the target stack has interrupts enabled, an interrupt may occur
+        * on the source stack.  This is bad when starting up a process because
+        * it's not supposed to get timer ticks until it has been scheduled.
+        * So, we disable interrupts around the sigsetjmp to ensure that
+        * they can't happen until we get back here where they are safe.
+        */
+       flags = get_signals();
+       block_signals();
+       if(UML_SIGSETJMP(&fork_buf, enable) == 0)
+               new_thread_proc(stack, handler);
+
+       remove_sigstack();
+
+       set_signals(flags);
+}
+
+void thread_wait(void *sw, void *fb)
+{
+       sigjmp_buf buf, **switch_buf = sw, *fork_buf;
+       int enable;
+
+       *switch_buf = &buf;
+       fork_buf = fb;
+       if(UML_SIGSETJMP(&buf, enable) == 0)
+               siglongjmp(*fork_buf, INIT_JMP_REMOVE_SIGSTACK);
+}
+
+void switch_threads(void *me, void *next)
+{
+       sigjmp_buf my_buf, **me_ptr = me, *next_buf = next;
+       int enable;
+
+       *me_ptr = &my_buf;
+       if(UML_SIGSETJMP(&my_buf, enable) == 0)
+               UML_SIGLONGJMP(next_buf, 1);
+}
+
+static sigjmp_buf initial_jmpbuf;
+
+/* XXX Make these percpu */
+static void (*cb_proc)(void *arg);
+static void *cb_arg;
+static sigjmp_buf *cb_back;
+
+int start_idle_thread(void *stack, void *switch_buf_ptr, void **fork_buf_ptr)
+{
+       sigjmp_buf **switch_buf = switch_buf_ptr;
+       int n, enable;
+
+       set_handler(SIGWINCH, (__sighandler_t) sig_handler,
+                   SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGALRM,
+                   SIGVTALRM, -1);
+
+       *fork_buf_ptr = &initial_jmpbuf;
+       n = UML_SIGSETJMP(&initial_jmpbuf, enable);
+       switch(n){
+       case INIT_JMP_NEW_THREAD:
+               new_thread_proc((void *) stack, new_thread_handler);
+               break;
+       case INIT_JMP_REMOVE_SIGSTACK:
+               remove_sigstack();
+               break;
+       case INIT_JMP_CALLBACK:
+               (*cb_proc)(cb_arg);
+               UML_SIGLONGJMP(cb_back, 1);
+               break;
+       case INIT_JMP_HALT:
+               kmalloc_ok = 0;
+               return(0);
+       case INIT_JMP_REBOOT:
+               kmalloc_ok = 0;
+               return(1);
+       default:
+               panic("Bad sigsetjmp return in start_idle_thread - %d\n", n);
+       }
+       UML_SIGLONGJMP(*switch_buf, 1);
+}
+
+void initial_thread_cb_skas(void (*proc)(void *), void *arg)
+{
+       sigjmp_buf here;
+       int enable;
+
+       cb_proc = proc;
+       cb_arg = arg;
+       cb_back = &here;
+
+       block_signals();
+       if(UML_SIGSETJMP(&here, enable) == 0)
+               UML_SIGLONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
+       unblock_signals();
+
+       cb_proc = NULL;
+       cb_arg = NULL;
+       cb_back = NULL;
+}
+
+void halt_skas(void)
+{
+       block_signals();
+       UML_SIGLONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
+}
+
+void reboot_skas(void)
+{
+       block_signals();
+       UML_SIGLONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
+}
+
+void switch_mm_skas(struct mm_id *mm_idp)
+{
+       int err;
+
+#warning need cpu pid in switch_mm_skas
+       if(proc_mm){
+               err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
+                            mm_idp->u.mm_fd);
+               if(err)
+                       panic("switch_mm_skas - PTRACE_SWITCH_MM failed, "
+                             "errno = %d\n", errno);
+       }
+       else userspace_pid[0] = mm_idp->u.pid;
+}
index b47e5e71d1a5d14d0ebefef0589ceef269a20851..6c5b17ed59e1170baca1f67854211d2c127e869a 100644 (file)
@@ -29,7 +29,6 @@
 #include "irq_user.h"
 #include "ptrace_user.h"
 #include "mem_user.h"
-#include "time_user.h"
 #include "init.h"
 #include "os.h"
 #include "uml-config.h"
index cf30a39bc4841a58ec27d0a1863732c4a4ee1ebd..6f7626775acb983e7f2e663d2312533d80c1a792 100644 (file)
+/*
+ * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#include <stdio.h>
 #include <stdlib.h>
+#include <unistd.h>
+#include <time.h>
 #include <sys/time.h>
+#include <signal.h>
+#include <errno.h>
+#include "user_util.h"
+#include "kern_util.h"
+#include "user.h"
+#include "process.h"
+#include "kern_constants.h"
+#include "os.h"
+
+/* XXX This really needs to be declared and initialized in a kernel file since
+ * it's in <linux/time.h>
+ */
+extern struct timespec wall_to_monotonic;
+
+static void set_interval(int timer_type)
+{
+       int usec = 1000000/hz();
+       struct itimerval interval = ((struct itimerval) { { 0, usec },
+                                                         { 0, usec } });
+
+       if(setitimer(timer_type, &interval, NULL) == -1)
+               panic("setitimer failed - errno = %d\n", errno);
+}
+
+void enable_timer(void)
+{
+       set_interval(ITIMER_VIRTUAL);
+}
+
+void disable_timer(void)
+{
+       struct itimerval disable = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
+       if((setitimer(ITIMER_VIRTUAL, &disable, NULL) < 0) ||
+          (setitimer(ITIMER_REAL, &disable, NULL) < 0))
+               printk("disnable_timer - setitimer failed, errno = %d\n",
+                      errno);
+       /* If there are signals already queued, after unblocking ignore them */
+       set_handler(SIGALRM, SIG_IGN, 0, -1);
+       set_handler(SIGVTALRM, SIG_IGN, 0, -1);
+}
+
+void switch_timers(int to_real)
+{
+       struct itimerval disable = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
+       struct itimerval enable = ((struct itimerval) { { 0, 1000000/hz() },
+                                                       { 0, 1000000/hz() }});
+       int old, new;
+
+       if(to_real){
+               old = ITIMER_VIRTUAL;
+               new = ITIMER_REAL;
+       }
+       else {
+               old = ITIMER_REAL;
+               new = ITIMER_VIRTUAL;
+       }
+
+       if((setitimer(old, &disable, NULL) < 0) ||
+          (setitimer(new, &enable, NULL)))
+               printk("switch_timers - setitimer failed, errno = %d\n",
+                      errno);
+}
 
-unsigned long long os_usecs(void)
+void uml_idle_timer(void)
+{
+       if(signal(SIGVTALRM, SIG_IGN) == SIG_ERR)
+               panic("Couldn't unset SIGVTALRM handler");
+
+       set_handler(SIGALRM, (__sighandler_t) alarm_handler,
+                   SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, SIGVTALRM, -1);
+       set_interval(ITIMER_REAL);
+}
+
+extern void ktime_get_ts(struct timespec *ts);
+#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
+
+void time_init(void)
+{
+       struct timespec now;
+
+       if(signal(SIGVTALRM, boot_timer_handler) == SIG_ERR)
+               panic("Couldn't set SIGVTALRM handler");
+       set_interval(ITIMER_VIRTUAL);
+
+       do_posix_clock_monotonic_gettime(&now);
+       wall_to_monotonic.tv_sec = -now.tv_sec;
+       wall_to_monotonic.tv_nsec = -now.tv_nsec;
+}
+
+unsigned long long os_nsecs(void)
 {
        struct timeval tv;
 
        gettimeofday(&tv, NULL);
-       return((unsigned long long) tv.tv_sec * 1000000 + tv.tv_usec);
+       return((unsigned long long) tv.tv_sec * BILLION + tv.tv_usec * 1000);
 }
 
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+void idle_sleep(int secs)
+{
+       struct timespec ts;
+
+       ts.tv_sec = secs;
+       ts.tv_nsec = 0;
+       nanosleep(&ts, NULL);
+}
+
+/* XXX This partly duplicates init_irq_signals */
+
+void user_time_init(void)
+{
+       set_handler(SIGVTALRM, (__sighandler_t) alarm_handler,
+                   SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH,
+                   SIGALRM, SIGUSR2, -1);
+       set_handler(SIGALRM, (__sighandler_t) alarm_handler,
+                   SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH,
+                   SIGVTALRM, SIGUSR2, -1);
+       set_interval(ITIMER_VIRTUAL);
+}
index 321e1c8e227d6248463c3986a1aaf1b933dcb0c0..a9f6b26f9828f8f4c0a99bc6a2f8449054be8bb8 100644 (file)
@@ -10,6 +10,7 @@
 #include "user_util.h"
 #include "os.h"
 #include "mode.h"
+#include "longjmp.h"
 
 void usr2_handler(int sig, union uml_pt_regs *regs)
 {
@@ -36,5 +37,5 @@ void do_longjmp(void *b, int val)
 {
        sigjmp_buf *buf = b;
 
-       siglongjmp(*buf, val);
+       UML_SIGLONGJMP(buf, val);
 }
index cb2648b79d0fd15090205da1f7595c7a2e639edf..919d19f11537172cf6817e3682909497eb73bd8c 100644 (file)
@@ -27,7 +27,6 @@
 #include "sysdep/sigcontext.h"
 #include "irq_user.h"
 #include "ptrace_user.h"
-#include "time_user.h"
 #include "init.h"
 #include "os.h"
 #include "uml-config.h"
@@ -63,6 +62,54 @@ void kill_child_dead(int pid)
        } while(1);
 }
 
+void stop(void)
+{
+       while(1) sleep(1000000);
+}
+
+int wait_for_stop(int pid, int sig, int cont_type, void *relay)
+{
+       sigset_t *relay_signals = relay;
+       int status, ret;
+
+       while(1){
+               CATCH_EINTR(ret = waitpid(pid, &status, WUNTRACED));
+               if((ret < 0) ||
+                  !WIFSTOPPED(status) || (WSTOPSIG(status) != sig)){
+                       if(ret < 0){
+                               printk("wait failed, errno = %d\n",
+                                      errno);
+                       }
+                       else if(WIFEXITED(status))
+                               printk("process %d exited with status %d\n",
+                                      pid, WEXITSTATUS(status));
+                       else if(WIFSIGNALED(status))
+                               printk("process %d exited with signal %d\n",
+                                      pid, WTERMSIG(status));
+                       else if((WSTOPSIG(status) == SIGVTALRM) ||
+                               (WSTOPSIG(status) == SIGALRM) ||
+                               (WSTOPSIG(status) == SIGIO) ||
+                               (WSTOPSIG(status) == SIGPROF) ||
+                               (WSTOPSIG(status) == SIGCHLD) ||
+                               (WSTOPSIG(status) == SIGWINCH) ||
+                               (WSTOPSIG(status) == SIGINT)){
+                               ptrace(cont_type, pid, 0, WSTOPSIG(status));
+                               continue;
+                       }
+                       else if((relay_signals != NULL) &&
+                               sigismember(relay_signals, WSTOPSIG(status))){
+                               ptrace(cont_type, pid, 0, WSTOPSIG(status));
+                               continue;
+                       }
+                       else printk("process %d stopped with signal %d\n",
+                                   pid, WSTOPSIG(status));
+                       panic("wait_for_stop failed to wait for %d to stop "
+                             "with %d\n", pid, sig);
+               }
+               return(status);
+       }
+}
+
 /*
  *-------------------------
  * only for tt mode (will be deleted in future...)
index 38d710158c3d816822c37a43cf3c32c0e0263255..166fb66995df87e58793c6f99a33334324eaddce 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <setjmp.h>
 #include <string.h>
+#include "longjmp.h"
 
 unsigned long __do_user_copy(void *to, const void *from, int n,
                             void **fault_addr, void **fault_catcher,
@@ -13,10 +14,11 @@ unsigned long __do_user_copy(void *to, const void *from, int n,
                                        int n), int *faulted_out)
 {
        unsigned long *faddrp = (unsigned long *) fault_addr, ret;
+       int enable;
 
        sigjmp_buf jbuf;
        *fault_catcher = &jbuf;
-       if(sigsetjmp(jbuf, 1) == 0){
+       if(UML_SIGSETJMP(&jbuf, enable) == 0){
                (*op)(to, from, n);
                ret = 0;
                *faulted_out = 0;
similarity index 56%
rename from arch/um/kernel/user_util.c
rename to arch/um/os-Linux/util.c
index 4c231161f25717b2bcbfad0c31598a87488beab0..e32065e2fdc80d7203a817d8708e3d3cbed845be 100644 (file)
@@ -1,4 +1,4 @@
-/* 
+/*
  * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  * Licensed under the GPL
  */
 #include "init.h"
 #include "ptrace_user.h"
 #include "uml-config.h"
-
-void stop(void)
-{
-       while(1) sleep(1000000);
-}
+#include "os.h"
+#include "longjmp.h"
 
 void stack_protections(unsigned long address)
 {
        int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
 
-        if(mprotect((void *) address, page_size(), prot) < 0)
+       if(mprotect((void *) address, page_size(), prot) < 0)
                panic("protecting stack failed, errno = %d", errno);
 }
 
@@ -59,49 +56,6 @@ void task_protections(unsigned long address)
                panic("protecting stack failed, errno = %d", errno);
 }
 
-int wait_for_stop(int pid, int sig, int cont_type, void *relay)
-{
-       sigset_t *relay_signals = relay;
-       int status, ret;
-
-       while(1){
-               CATCH_EINTR(ret = waitpid(pid, &status, WUNTRACED));
-               if((ret < 0) ||
-                  !WIFSTOPPED(status) || (WSTOPSIG(status) != sig)){
-                       if(ret < 0){
-                               printk("wait failed, errno = %d\n",
-                                      errno);
-                       }
-                       else if(WIFEXITED(status)) 
-                               printk("process %d exited with status %d\n",
-                                      pid, WEXITSTATUS(status));
-                       else if(WIFSIGNALED(status))
-                               printk("process %d exited with signal %d\n",
-                                      pid, WTERMSIG(status));
-                       else if((WSTOPSIG(status) == SIGVTALRM) ||
-                               (WSTOPSIG(status) == SIGALRM) ||
-                               (WSTOPSIG(status) == SIGIO) ||
-                               (WSTOPSIG(status) == SIGPROF) ||
-                               (WSTOPSIG(status) == SIGCHLD) ||
-                               (WSTOPSIG(status) == SIGWINCH) ||
-                               (WSTOPSIG(status) == SIGINT)){
-                               ptrace(cont_type, pid, 0, WSTOPSIG(status));
-                               continue;
-                       }
-                       else if((relay_signals != NULL) &&
-                               sigismember(relay_signals, WSTOPSIG(status))){
-                               ptrace(cont_type, pid, 0, WSTOPSIG(status));
-                               continue;
-                       }
-                       else printk("process %d stopped with signal %d\n",
-                                   pid, WSTOPSIG(status));
-                       panic("wait_for_stop failed to wait for %d to stop "
-                             "with %d\n", pid, sig);
-               }
-               return(status);
-       }
-}
-
 int raw(int fd)
 {
        struct termios tt;
@@ -113,7 +67,7 @@ int raw(int fd)
 
        cfmakeraw(&tt);
 
-       CATCH_EINTR(err = tcsetattr(fd, TCSADRAIN, &tt));
+       CATCH_EINTR(err = tcsetattr(fd, TCSADRAIN, &tt));
        if(err < 0)
                return -errno;
 
@@ -149,7 +103,7 @@ void setup_hostinfo(void)
 
 int setjmp_wrapper(void (*proc)(void *, void *), ...)
 {
-        va_list args;
+       va_list args;
        sigjmp_buf buf;
        int n;
 
@@ -161,14 +115,3 @@ int setjmp_wrapper(void (*proc)(void *, void *), ...)
        va_end(args);
        return(n);
 }
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
index 17746b4c08ff00afee1407018b05611e78e69bbe..0cdfd4481d5e4c4214e9dfaaaff8d965c3fbddad 100644 (file)
@@ -16,6 +16,8 @@
 #include "choose-mode.h"
 #include "kern.h"
 #include "mode_kern.h"
+#include "proc_mm.h"
+#include "os.h"
 
 extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
 
@@ -456,13 +458,14 @@ long init_new_ldt(struct mmu_context_skas * new_mm,
        int i;
        long page, err=0;
        void *addr = NULL;
+       struct proc_mm_op copy;
 
-       memset(&desc, 0, sizeof(desc));
 
        if(!ptrace_ldt)
                init_MUTEX(&new_mm->ldt.semaphore);
 
        if(!from_mm){
+               memset(&desc, 0, sizeof(desc));
                /*
                 * We have to initialize a clean ldt.
                 */
@@ -494,8 +497,26 @@ long init_new_ldt(struct mmu_context_skas * new_mm,
                        }
                }
                new_mm->ldt.entry_count = 0;
+
+               goto out;
        }
-       else if (!ptrace_ldt) {
+
+       if(proc_mm){
+               /* We have a valid from_mm, so we now have to copy the LDT of
+                * from_mm to new_mm, because using proc_mm an new mm with
+                * an empty/default LDT was created in new_mm()
+                */
+               copy = ((struct proc_mm_op) { .op       = MM_COPY_SEGMENTS,
+                                             .u        =
+                                             { .copy_segments =
+                                                       from_mm->id.u.mm_fd } } );
+               i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
+               if(i != sizeof(copy))
+                       printk("new_mm : /proc/mm copy_segments failed, "
+                              "err = %d\n", -i);
+       }
+
+       if(!ptrace_ldt) {
                /* Our local LDT is used to supply the data for
                 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
                 * i.e., we have to use the stub for modify_ldt, which
@@ -524,6 +545,7 @@ long init_new_ldt(struct mmu_context_skas * new_mm,
                up(&from_mm->ldt.semaphore);
        }
 
+    out:
        return err;
 }
 
index 5231fe83ea4ba6229a5047429fd8d803f717871d..09a3eb743315313b4770ce37982893a09fe4bbbe 100644 (file)
@@ -646,6 +646,7 @@ CONFIG_8139TOO=y
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
 # CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
index 58f5bfb52c63e2cb478f402cc9d7c8f3b259f3ef..f05c2a802489052d1db9000dc444f09dc5321ec4 100644 (file)
@@ -672,6 +672,19 @@ ia32_sys_call_table:
        .quad sys_inotify_add_watch
        .quad sys_inotify_rm_watch
        .quad sys_migrate_pages
+       .quad compat_sys_openat         /* 295 */
+       .quad sys_mkdirat
+       .quad sys_mknodat
+       .quad sys_fchownat
+       .quad sys_futimesat
+       .quad compat_sys_newfstatat     /* 300 */
+       .quad sys_unlinkat
+       .quad sys_renameat
+       .quad sys_linkat
+       .quad sys_symlinkat
+       .quad sys_readlinkat            /* 305 */
+       .quad sys_fchmodat
+       .quad sys_faccessat
 ia32_syscall_end:              
        .rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
                .quad ni_syscall
index 8ac4db09610afd501d16db48131229d20863b950..70f1bb808a20e85a98931a2136345228a738e490 100644 (file)
@@ -146,7 +146,7 @@ void pda_init(int cpu)
        pda->irqstackptr += IRQSTACKSIZE-64;
 } 
 
-char boot_exception_stacks[(N_EXCEPTION_STACKS - 2) * EXCEPTION_STKSZ + DEBUG_STKSZ]
+char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
 __attribute__((section(".bss.page_aligned")));
 
 /* May not be marked __init: used by software suspend */
index c9f424d5399c55b588779f252e8bb41b0fe51410..96a61e029ce5e7858d2e024c08eff817b5a998d6 100644 (file)
@@ -139,35 +139,16 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e,
 
 static char chosen_elevator[16];
 
-static void elevator_setup_default(void)
+static int __init elevator_setup(char *str)
 {
-       struct elevator_type *e;
-
-       /*
-        * If default has not been set, use the compiled-in selection.
-        */
-       if (!chosen_elevator[0])
-               strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
-
        /*
         * Be backwards-compatible with previous kernels, so users
         * won't get the wrong elevator.
         */
-       if (!strcmp(chosen_elevator, "as"))
+       if (!strcmp(str, "as"))
                strcpy(chosen_elevator, "anticipatory");
-
-       /*
-        * If the given scheduler is not available, fall back to the default
-        */
-       if ((e = elevator_find(chosen_elevator)))
-               elevator_put(e);
        else
-               strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
-}
-
-static int __init elevator_setup(char *str)
-{
-       strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
+               strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
        return 0;
 }
 
@@ -184,14 +165,16 @@ int elevator_init(request_queue_t *q, char *name)
        q->end_sector = 0;
        q->boundary_rq = NULL;
 
-       elevator_setup_default();
+       if (name && !(e = elevator_get(name)))
+               return -EINVAL;
 
-       if (!name)
-               name = chosen_elevator;
+       if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
+               printk("I/O scheduler %s not found\n", chosen_elevator);
 
-       e = elevator_get(name);
-       if (!e)
-               return -EINVAL;
+       if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
+               printk("Default I/O scheduler not found, using no-op\n");
+               e = elevator_get("noop");
+       }
 
        eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
        if (!eq) {
@@ -669,8 +652,10 @@ int elv_register(struct elevator_type *e)
        spin_unlock_irq(&elv_list_lock);
 
        printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
-       if (!strcmp(e->elevator_name, chosen_elevator))
-               printk(" (default)");
+       if (!strcmp(e->elevator_name, chosen_elevator) ||
+                       (!*chosen_elevator &&
+                        !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
+                               printk(" (default)");
        printk("\n");
        return 0;
 }
index 8e27d0ab0d7ccefef5a53d42c52978200040ca18..d38b4afa37ef038dd8a7e0472b1150261972bf47 100644 (file)
@@ -304,6 +304,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
  * blk_queue_ordered - does this queue support ordered writes
  * @q:        the request queue
  * @ordered:  one of QUEUE_ORDERED_*
+ * @prepare_flush_fn: rq setup helper for cache flush ordered writes
  *
  * Description:
  *   For journalled file systems, doing ordered writes on a commit
@@ -332,6 +333,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered,
                return -EINVAL;
        }
 
+       q->ordered = ordered;
        q->next_ordered = ordered;
        q->prepare_flush_fn = prepare_flush_fn;
 
@@ -662,7 +664,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
  *    Enables a low level driver to set an upper limit on the size of
  *    received requests.
  **/
-void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
+void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
 {
        if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
                max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -2632,6 +2634,7 @@ EXPORT_SYMBOL(blk_put_request);
 /**
  * blk_end_sync_rq - executes a completion event on a request
  * @rq: request to complete
+ * @error: end io status of the request
  */
 void blk_end_sync_rq(struct request *rq, int error)
 {
@@ -3153,7 +3156,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
        if (blk_fs_request(req) && req->rq_disk) {
                const int rw = rq_data_dir(req);
 
-               __disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
+               disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
        }
 
        total_bytes = bio_nbytes = 0;
index 283c089537bcd3799226a2e87bfb43a5b62fdc31..bddf431bbb72b51dbe492461ecf50d2b07a92499 100644 (file)
@@ -68,4 +68,6 @@ source "drivers/infiniband/Kconfig"
 
 source "drivers/sn/Kconfig"
 
+source "drivers/edac/Kconfig"
+
 endmenu
index 7c45050ecd03c1b25d883429820875823a797bf4..619dd964c51cd549e906414cef6dc1d0a7f3cdea 100644 (file)
@@ -63,6 +63,7 @@ obj-$(CONFIG_PHONE)           += telephony/
 obj-$(CONFIG_MD)               += md/
 obj-$(CONFIG_BT)               += bluetooth/
 obj-$(CONFIG_ISDN)             += isdn/
+obj-$(CONFIG_EDAC)             += edac/
 obj-$(CONFIG_MCA)              += mca/
 obj-$(CONFIG_EISA)             += eisa/
 obj-$(CONFIG_CPU_FREQ)         += cpufreq/
index 07c9be6a6bbf4ceebbb9103e9f3b6b2fc1f99657..a85a60a93deb27358e7a7d7d9e6806c3bedacafe 100644 (file)
@@ -2630,7 +2630,7 @@ static int get_interface(struct slgt_info *info, int __user *if_mode)
 static int set_interface(struct slgt_info *info, int if_mode)
 {
        unsigned long flags;
-       unsigned char val;
+       unsigned short val;
 
        DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
        spin_lock_irqsave(&info->lock,flags);
index bc56df8a34740438f7040ec5b299727f1f5653c0..4c272189cd4209a120cebd02294e9b791f53f6e4 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/kernel.h>      /* printk() */
 #include <linux/fs.h>          /* everything... */
 #include <linux/errno.h>       /* error codes */
-#include <linux/delay.h>       /* udelay */
 #include <linux/slab.h>
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
@@ -156,6 +155,8 @@ This directory exports the following interfaces.  There operation is
 documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4.
 alarms                         :
 current_ref                    :
+received_ref_clk3a             :
+received_ref_clk3b             :
 enable_clk3a_output            :
 enable_clk3b_output            :
 enable_clka0_output            :
@@ -165,7 +166,7 @@ enable_clkb1_output         :
 filter_select                  :
 hardware_switching             :
 hardware_switching_mode                :
-interrupt_switch               :
+telclock_version               :
 mode_select                    :
 refalign                       :
 reset                          :
@@ -173,7 +174,6 @@ select_amcb1_transmit_clock :
 select_amcb2_transmit_clock    :
 select_redundant_clock         :
 select_ref_frequency           :
-test_mode                      :
 
 All sysfs interfaces are integers in hex format, i.e echo 99 > refalign
 has the same effect as echo 0x99 > refalign.
@@ -226,7 +226,7 @@ static int tlclk_release(struct inode *inode, struct file *filp)
        return 0;
 }
 
-ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
+static ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
                loff_t *f_pos)
 {
        if (count < sizeof(struct tlclk_alarms))
@@ -242,7 +242,7 @@ ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
        return  sizeof(struct tlclk_alarms);
 }
 
-ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count,
+static ssize_t tlclk_write(struct file *filp, const char __user *buf, size_t count,
            loff_t *f_pos)
 {
        return 0;
@@ -278,21 +278,21 @@ static ssize_t show_current_ref(struct device *d,
 static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL);
 
 
-static ssize_t show_interrupt_switch(struct device *d,
+static ssize_t show_telclock_version(struct device *d,
                struct device_attribute *attr, char *buf)
 {
        unsigned long ret_val;
        unsigned long flags;
 
        spin_lock_irqsave(&event_lock, flags);
-       ret_val = inb(TLCLK_REG6);
+       ret_val = inb(TLCLK_REG5);
        spin_unlock_irqrestore(&event_lock, flags);
 
        return sprintf(buf, "0x%lX\n", ret_val);
 }
 
-static DEVICE_ATTR(interrupt_switch, S_IRUGO,
-               show_interrupt_switch, NULL);
+static DEVICE_ATTR(telclock_version, S_IRUGO,
+               show_telclock_version, NULL);
 
 static ssize_t show_alarms(struct device *d,
                struct device_attribute *attr,  char *buf)
@@ -309,6 +309,50 @@ static ssize_t show_alarms(struct device *d,
 
 static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
 
+static ssize_t store_received_ref_clk3a(struct device *d,
+                struct device_attribute *attr, const char *buf, size_t count)
+{
+       unsigned long tmp;
+       unsigned char val;
+       unsigned long flags;
+
+       sscanf(buf, "%lX", &tmp);
+       dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+       val = (unsigned char)tmp;
+       spin_lock_irqsave(&event_lock, flags);
+       SET_PORT_BITS(TLCLK_REG1, 0xef, val);
+       spin_unlock_irqrestore(&event_lock, flags);
+
+       return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(received_ref_clk3a, S_IWUGO, NULL,
+               store_received_ref_clk3a);
+
+
+static ssize_t store_received_ref_clk3b(struct device *d,
+                struct device_attribute *attr, const char *buf, size_t count)
+{
+       unsigned long tmp;
+       unsigned char val;
+       unsigned long flags;
+
+       sscanf(buf, "%lX", &tmp);
+       dev_dbg(d, ": tmp = 0x%lX\n", tmp);
+
+       val = (unsigned char)tmp;
+       spin_lock_irqsave(&event_lock, flags);
+       SET_PORT_BITS(TLCLK_REG1, 0xef, val << 1);
+       spin_unlock_irqrestore(&event_lock, flags);
+
+       return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(received_ref_clk3b, S_IWUGO, NULL,
+               store_received_ref_clk3b);
+
+
 static ssize_t store_enable_clk3b_output(struct device *d,
                 struct device_attribute *attr, const char *buf, size_t count)
 {
@@ -436,26 +480,6 @@ static ssize_t store_enable_clka0_output(struct device *d,
 static DEVICE_ATTR(enable_clka0_output, S_IWUGO, NULL,
                store_enable_clka0_output);
 
-static ssize_t store_test_mode(struct device *d,
-               struct device_attribute *attr,  const char *buf, size_t count)
-{
-       unsigned long flags;
-       unsigned long tmp;
-       unsigned char val;
-
-       sscanf(buf, "%lX", &tmp);
-       dev_dbg(d, "tmp = 0x%lX\n", tmp);
-
-       val = (unsigned char)tmp;
-       spin_lock_irqsave(&event_lock, flags);
-       SET_PORT_BITS(TLCLK_REG4, 0xfd, 2);
-       spin_unlock_irqrestore(&event_lock, flags);
-
-       return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(test_mode, S_IWUGO, NULL, store_test_mode);
-
 static ssize_t store_select_amcb2_transmit_clock(struct device *d,
                struct device_attribute *attr, const char *buf, size_t count)
 {
@@ -475,7 +499,7 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d,
                        SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38);
                        switch (val) {
                        case CLK_8_592MHz:
-                               SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
+                               SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
                                break;
                        case CLK_11_184MHz:
                                SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
@@ -484,7 +508,7 @@ static ssize_t store_select_amcb2_transmit_clock(struct device *d,
                                SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
                                break;
                        case CLK_44_736MHz:
-                               SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
+                               SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
                                break;
                        }
                } else
@@ -653,9 +677,7 @@ static ssize_t store_refalign (struct device *d,
        dev_dbg(d, "tmp = 0x%lX\n", tmp);
        spin_lock_irqsave(&event_lock, flags);
        SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
-       udelay(2);
        SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08);
-       udelay(2);
        SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
        spin_unlock_irqrestore(&event_lock, flags);
 
@@ -706,15 +728,16 @@ static DEVICE_ATTR(reset, S_IWUGO, NULL, store_reset);
 
 static struct attribute *tlclk_sysfs_entries[] = {
        &dev_attr_current_ref.attr,
-       &dev_attr_interrupt_switch.attr,
+       &dev_attr_telclock_version.attr,
        &dev_attr_alarms.attr,
+       &dev_attr_received_ref_clk3a.attr,
+       &dev_attr_received_ref_clk3b.attr,
        &dev_attr_enable_clk3a_output.attr,
        &dev_attr_enable_clk3b_output.attr,
        &dev_attr_enable_clkb1_output.attr,
        &dev_attr_enable_clka1_output.attr,
        &dev_attr_enable_clkb0_output.attr,
        &dev_attr_enable_clka0_output.attr,
-       &dev_attr_test_mode.attr,
        &dev_attr_select_amcb1_transmit_clock.attr,
        &dev_attr_select_amcb2_transmit_clock.attr,
        &dev_attr_select_redundant_clock.attr,
index 277a843a87a60bc3cc001fe10529d7c053e8a7f5..7a511479ae29b615e0d8fe1fcfdd223212a7e2a8 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/completion.h>
+#include <linux/mutex.h>
 
 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg)
 
@@ -55,7 +56,7 @@ static DECLARE_RWSEM          (cpufreq_notifier_rwsem);
 
 
 static LIST_HEAD(cpufreq_governor_list);
-static DECLARE_MUTEX           (cpufreq_governor_sem);
+static DEFINE_MUTEX            (cpufreq_governor_mutex);
 
 struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu)
 {
@@ -297,18 +298,18 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
                return -EINVAL;
        } else {
                struct cpufreq_governor *t;
-               down(&cpufreq_governor_sem);
+               mutex_lock(&cpufreq_governor_mutex);
                if (!cpufreq_driver || !cpufreq_driver->target)
                        goto out;
                list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
                        if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) {
                                *governor = t;
-                               up(&cpufreq_governor_sem);
+                               mutex_unlock(&cpufreq_governor_mutex);
                                return 0;
                        }
                }
        out:
-               up(&cpufreq_governor_sem);
+               mutex_unlock(&cpufreq_governor_mutex);
        }
        return -EINVAL;
 }
@@ -600,7 +601,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
        policy->cpu = cpu;
        policy->cpus = cpumask_of_cpu(cpu);
 
-       init_MUTEX_LOCKED(&policy->lock);
+       mutex_init(&policy->lock);
+       mutex_lock(&policy->lock);
        init_completion(&policy->kobj_unregister);
        INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
 
@@ -610,6 +612,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
        ret = cpufreq_driver->init(policy);
        if (ret) {
                dprintk("initialization failed\n");
+               mutex_unlock(&policy->lock);
                goto err_out;
        }
 
@@ -621,9 +624,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
        strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN);
 
        ret = kobject_register(&policy->kobj);
-       if (ret)
+       if (ret) {
+               mutex_unlock(&policy->lock);
                goto err_out_driver_exit;
-
+       }
        /* set up files for this cpu device */
        drv_attr = cpufreq_driver->attr;
        while ((drv_attr) && (*drv_attr)) {
@@ -641,7 +645,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
        policy->governor = NULL; /* to assure that the starting sequence is
                                  * run in cpufreq_set_policy */
-       up(&policy->lock);
+       mutex_unlock(&policy->lock);
        
        /* set default policy */
        
@@ -762,10 +766,10 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 #endif
 
-       down(&data->lock);
+       mutex_lock(&data->lock);
        if (cpufreq_driver->target)
                __cpufreq_governor(data, CPUFREQ_GOV_STOP);
-       up(&data->lock);
+       mutex_unlock(&data->lock);
 
        kobject_unregister(&data->kobj);
 
@@ -834,9 +838,9 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
        unsigned int ret = 0;
 
        if (policy) {
-               down(&policy->lock);
+               mutex_lock(&policy->lock);
                ret = policy->cur;
-               up(&policy->lock);
+               mutex_unlock(&policy->lock);
                cpufreq_cpu_put(policy);
        }
 
@@ -862,7 +866,7 @@ unsigned int cpufreq_get(unsigned int cpu)
        if (!cpufreq_driver->get)
                goto out;
 
-       down(&policy->lock);
+       mutex_lock(&policy->lock);
 
        ret = cpufreq_driver->get(cpu);
 
@@ -875,7 +879,7 @@ unsigned int cpufreq_get(unsigned int cpu)
                }
        }
 
-       up(&policy->lock);
+       mutex_unlock(&policy->lock);
 
  out:
        cpufreq_cpu_put(policy);
@@ -1158,11 +1162,11 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
        if (!policy)
                return -EINVAL;
 
-       down(&policy->lock);
+       mutex_lock(&policy->lock);
 
        ret = __cpufreq_driver_target(policy, target_freq, relation);
 
-       up(&policy->lock);
+       mutex_unlock(&policy->lock);
 
        cpufreq_cpu_put(policy);
 
@@ -1199,9 +1203,9 @@ int cpufreq_governor(unsigned int cpu, unsigned int event)
        if (!policy)
                return -EINVAL;
 
-       down(&policy->lock);
+       mutex_lock(&policy->lock);
        ret = __cpufreq_governor(policy, event);
-       up(&policy->lock);
+       mutex_unlock(&policy->lock);
 
        cpufreq_cpu_put(policy);
 
@@ -1217,17 +1221,17 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
        if (!governor)
                return -EINVAL;
 
-       down(&cpufreq_governor_sem);
+       mutex_lock(&cpufreq_governor_mutex);
        
        list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
                if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) {
-                       up(&cpufreq_governor_sem);
+                       mutex_unlock(&cpufreq_governor_mutex);
                        return -EBUSY;
                }
        }
        list_add(&governor->governor_list, &cpufreq_governor_list);
 
-       up(&cpufreq_governor_sem);
+       mutex_unlock(&cpufreq_governor_mutex);
 
        return 0;
 }
@@ -1239,9 +1243,9 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
        if (!governor)
                return;
 
-       down(&cpufreq_governor_sem);
+       mutex_lock(&cpufreq_governor_mutex);
        list_del(&governor->governor_list);
-       up(&cpufreq_governor_sem);
+       mutex_unlock(&cpufreq_governor_mutex);
        return;
 }
 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
@@ -1268,9 +1272,9 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
        if (!cpu_policy)
                return -EINVAL;
 
-       down(&cpu_policy->lock);
+       mutex_lock(&cpu_policy->lock);
        memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
-       up(&cpu_policy->lock);
+       mutex_unlock(&cpu_policy->lock);
 
        cpufreq_cpu_put(cpu_policy);
 
@@ -1382,7 +1386,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
                return -EINVAL;
 
        /* lock this CPU */
-       down(&data->lock);
+       mutex_lock(&data->lock);
 
        ret = __cpufreq_set_policy(data, policy);
        data->user_policy.min = data->min;
@@ -1390,7 +1394,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
        data->user_policy.policy = data->policy;
        data->user_policy.governor = data->governor;
 
-       up(&data->lock);
+       mutex_unlock(&data->lock);
        cpufreq_cpu_put(data);
 
        return ret;
@@ -1414,7 +1418,7 @@ int cpufreq_update_policy(unsigned int cpu)
        if (!data)
                return -ENODEV;
 
-       down(&data->lock);
+       mutex_lock(&data->lock);
 
        dprintk("updating policy for CPU %u\n", cpu);
        memcpy(&policy, 
@@ -1425,9 +1429,17 @@ int cpufreq_update_policy(unsigned int cpu)
        policy.policy = data->user_policy.policy;
        policy.governor = data->user_policy.governor;
 
+       /* BIOS might change freq behind our back
+         -> ask driver for current freq and notify governors about a change */
+       if (cpufreq_driver->get) {
+               policy.cur = cpufreq_driver->get(cpu);
+               if (data->cur != policy.cur)
+                       cpufreq_out_of_sync(cpu, data->cur, policy.cur);
+       }
+
        ret = __cpufreq_set_policy(data, &policy);
 
-       up(&data->lock);
+       mutex_unlock(&data->lock);
 
        cpufreq_cpu_put(data);
        return ret;
index 39543a2bed0f43c232e724ce809f1c89d438f6e1..ac38766b2583eec5b5db349b7227e443d6b1177c 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/jiffies.h>
 #include <linux/kernel_stat.h>
 #include <linux/percpu.h>
-
+#include <linux/mutex.h>
 /*
  * dbs is used in this file as a shortform for demandbased switching
  * It helps to keep variable names smaller, simpler
@@ -71,7 +71,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
 
 static unsigned int dbs_enable;        /* number of CPUs using this policy */
 
-static DECLARE_MUTEX   (dbs_sem);
+static DEFINE_MUTEX    (dbs_mutex);
 static DECLARE_WORK    (dbs_work, do_dbs_timer, NULL);
 
 struct dbs_tuners {
@@ -139,9 +139,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
        if (ret != 1 )
                return -EINVAL;
 
-       down(&dbs_sem);
+       mutex_lock(&dbs_mutex);
        dbs_tuners_ins.sampling_down_factor = input;
-       up(&dbs_sem);
+       mutex_unlock(&dbs_mutex);
 
        return count;
 }
@@ -153,14 +153,14 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
        int ret;
        ret = sscanf (buf, "%u", &input);
 
-       down(&dbs_sem);
+       mutex_lock(&dbs_mutex);
        if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
-               up(&dbs_sem);
+               mutex_unlock(&dbs_mutex);
                return -EINVAL;
        }
 
        dbs_tuners_ins.sampling_rate = input;
-       up(&dbs_sem);
+       mutex_unlock(&dbs_mutex);
 
        return count;
 }
@@ -172,16 +172,16 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
        int ret;
        ret = sscanf (buf, "%u", &input);
 
-       down(&dbs_sem);
+       mutex_lock(&dbs_mutex);
        if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 
                        input < MIN_FREQUENCY_UP_THRESHOLD ||
                        input <= dbs_tuners_ins.down_threshold) {
-               up(&dbs_sem);
+               mutex_unlock(&dbs_mutex);
                return -EINVAL;
        }
 
        dbs_tuners_ins.up_threshold = input;
-       up(&dbs_sem);
+       mutex_unlock(&dbs_mutex);
 
        return count;
 }
@@ -193,16 +193,16 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
        int ret;
        ret = sscanf (buf, "%u", &input);
 
-       down(&dbs_sem);
+       mutex_lock(&dbs_mutex);
        if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || 
                        input < MIN_FREQUENCY_DOWN_THRESHOLD ||
                        input >= dbs_tuners_ins.up_threshold) {
-               up(&dbs_sem);
+               mutex_unlock(&dbs_mutex);
                return -EINVAL;
        }
 
        dbs_tuners_ins.down_threshold = input;
-       up(&dbs_sem);
+       mutex_unlock(&dbs_mutex);
 
        return count;
 }
@@ -222,9 +222,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
        if ( input > 1 )
                input = 1;
        
-       down(&dbs_sem);
+       mutex_lock(&dbs_mutex);
        if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
-               up(&dbs_sem);
+               mutex_unlock(&dbs_mutex);
                return count;
        }
        dbs_tuners_ins.ignore_nice = input;
@@ -236,7 +236,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
                j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
                j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
        }
-       up(&dbs_sem);
+       mutex_unlock(&dbs_mutex);
 
        return count;
 }
@@ -257,9 +257,9 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
        
        /* no need to test here if freq_step is zero as the user might actually
         * want this, they would be crazy though :) */
-       down(&dbs_sem);
+       mutex_lock(&dbs_mutex);
        dbs_tuners_ins.freq_step = input;
-       up(&dbs_sem);
+       mutex_unlock(&dbs_mutex);
 
        return count;
 }
@@ -444,12 +444,12 @@ static void dbs_check_cpu(int cpu)
 static void do_dbs_timer(void *data)
 { 
        int i;
-       down(&dbs_sem);
+       mutex_lock(&dbs_mutex);
        for_each_online_cpu(i)
                dbs_check_cpu(i);
        schedule_delayed_work(&dbs_work, 
                        usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
-       up(&dbs_sem);
+       mutex_unlock(&dbs_mutex);
 } 
 
 static inline void dbs_timer_init(void)
@@ -487,7 +487,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                if (this_dbs_info->enable) /* Already enabled */
                        break;
                 
-               down(&dbs_sem);
+               mutex_lock(&dbs_mutex);
                for_each_cpu_mask(j, policy->cpus) {
                        struct cpu_dbs_info_s *j_dbs_info;
                        j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -521,11 +521,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                        dbs_timer_init();
                }
                
-               up(&dbs_sem);
+               mutex_unlock(&dbs_mutex);
                break;
 
        case CPUFREQ_GOV_STOP:
-               down(&dbs_sem);
+               mutex_lock(&dbs_mutex);
                this_dbs_info->enable = 0;
                sysfs_remove_group(&policy->kobj, &dbs_attr_group);
                dbs_enable--;
@@ -536,12 +536,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                if (dbs_enable == 0) 
                        dbs_timer_exit();
                
-               up(&dbs_sem);
+               mutex_unlock(&dbs_mutex);
 
                break;
 
        case CPUFREQ_GOV_LIMITS:
-               down(&dbs_sem);
+               mutex_lock(&dbs_mutex);
                if (policy->max < this_dbs_info->cur_policy->cur)
                        __cpufreq_driver_target(
                                        this_dbs_info->cur_policy,
@@ -550,7 +550,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                        __cpufreq_driver_target(
                                        this_dbs_info->cur_policy,
                                        policy->min, CPUFREQ_RELATION_L);
-               up(&dbs_sem);
+               mutex_unlock(&dbs_mutex);
                break;
        }
        return 0;
index e69fd8dd1f1cb7bf6042a878b2545a714c7df133..9ee9411f186f9b8738f4a7320991f058576733d7 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/jiffies.h>
 #include <linux/kernel_stat.h>
 #include <linux/percpu.h>
+#include <linux/mutex.h>
 
 /*
  * dbs is used in this file as a shortform for demandbased switching
@@ -70,7 +71,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
 
 static unsigned int dbs_enable;        /* number of CPUs using this policy */
 
-static DECLARE_MUTEX   (dbs_sem);
+static DEFINE_MUTEX    (dbs_mutex);
 static DECLARE_WORK    (dbs_work, do_dbs_timer, NULL);
 
 struct dbs_tuners {
@@ -136,9 +137,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
        if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
                return -EINVAL;
 
-       down(&dbs_sem);
+       mutex_lock(&dbs_mutex);
        dbs_tuners_ins.sampling_down_factor = input;
-       up(&dbs_sem);
+       mutex_unlock(&dbs_mutex);
 
        return count;
 }
@@ -150,14 +151,14 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
        int ret;
        ret = sscanf (buf, "%u", &input);
 
-       down(&dbs_sem);
+       mutex_lock(&dbs_mutex);
        if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
-               up(&dbs_sem);
+               mutex_unlock(&dbs_mutex);
                return -EINVAL;
        }
 
        dbs_tuners_ins.sampling_rate = input;
-       up(&dbs_sem);
+       mutex_unlock(&dbs_mutex);
 
        return count;
 }
@@ -169,15 +170,15 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
        int ret;
        ret = sscanf (buf, "%u", &input);
 
-       down(&dbs_sem);
+       mutex_lock(&dbs_mutex);
        if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 
                        input < MIN_FREQUENCY_UP_THRESHOLD) {
-               up(&dbs_sem);
+               mutex_unlock(&dbs_mutex);
                return -EINVAL;
        }
 
        dbs_tuners_ins.up_threshold = input;
-       up(&dbs_sem);
+       mutex_unlock(&dbs_mutex);
 
        return count;
 }
@@ -197,9 +198,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
        if ( input > 1 )
                input = 1;
        
-       down(&dbs_sem);
+       mutex_lock(&dbs_mutex);
        if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
-               up(&dbs_sem);
+               mutex_unlock(&dbs_mutex);
                return count;
        }
        dbs_tuners_ins.ignore_nice = input;
@@ -211,7 +212,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
                j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
                j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
        }
-       up(&dbs_sem);
+       mutex_unlock(&dbs_mutex);
 
        return count;
 }
@@ -356,12 +357,12 @@ static void dbs_check_cpu(int cpu)
 static void do_dbs_timer(void *data)
 { 
        int i;
-       down(&dbs_sem);
+       mutex_lock(&dbs_mutex);
        for_each_online_cpu(i)
                dbs_check_cpu(i);
        schedule_delayed_work(&dbs_work, 
                        usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
-       up(&dbs_sem);
+       mutex_unlock(&dbs_mutex);
 } 
 
 static inline void dbs_timer_init(void)
@@ -399,7 +400,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                if (this_dbs_info->enable) /* Already enabled */
                        break;
                 
-               down(&dbs_sem);
+               mutex_lock(&dbs_mutex);
                for_each_cpu_mask(j, policy->cpus) {
                        struct cpu_dbs_info_s *j_dbs_info;
                        j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -435,11 +436,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                        dbs_timer_init();
                }
                
-               up(&dbs_sem);
+               mutex_unlock(&dbs_mutex);
                break;
 
        case CPUFREQ_GOV_STOP:
-               down(&dbs_sem);
+               mutex_lock(&dbs_mutex);
                this_dbs_info->enable = 0;
                sysfs_remove_group(&policy->kobj, &dbs_attr_group);
                dbs_enable--;
@@ -450,12 +451,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                if (dbs_enable == 0) 
                        dbs_timer_exit();
                
-               up(&dbs_sem);
+               mutex_unlock(&dbs_mutex);
 
                break;
 
        case CPUFREQ_GOV_LIMITS:
-               down(&dbs_sem);
+               mutex_lock(&dbs_mutex);
                if (policy->max < this_dbs_info->cur_policy->cur)
                        __cpufreq_driver_target(
                                        this_dbs_info->cur_policy,
@@ -464,7 +465,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                        __cpufreq_driver_target(
                                        this_dbs_info->cur_policy,
                                        policy->min, CPUFREQ_RELATION_L);
-               up(&dbs_sem);
+               mutex_unlock(&dbs_mutex);
                break;
        }
        return 0;
index d32bf3593cd31433a40f7a1f70f839a1153e0a63..92a0be22a2a95bd92dd19c4c152880608910716d 100644 (file)
@@ -1,3 +1,4 @@
+
 /*
  *  linux/drivers/cpufreq/cpufreq_userspace.c
  *
@@ -21,6 +22,7 @@
 #include <linux/types.h>
 #include <linux/fs.h>
 #include <linux/sysfs.h>
+#include <linux/mutex.h>
 
 #include <asm/uaccess.h>
 
@@ -33,9 +35,8 @@ static unsigned int   cpu_min_freq[NR_CPUS];
 static unsigned int    cpu_cur_freq[NR_CPUS]; /* current CPU freq */
 static unsigned int    cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */
 static unsigned int    cpu_is_managed[NR_CPUS];
-static struct cpufreq_policy current_policy[NR_CPUS];
 
-static DECLARE_MUTEX   (userspace_sem); 
+static DEFINE_MUTEX    (userspace_mutex);
 
 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
 
@@ -64,35 +65,34 @@ static struct notifier_block userspace_cpufreq_notifier_block = {
  *
  * Sets the CPU frequency to freq.
  */
-static int cpufreq_set(unsigned int freq, unsigned int cpu)
+static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
 {
        int ret = -EINVAL;
 
-       dprintk("cpufreq_set for cpu %u, freq %u kHz\n", cpu, freq);
+       dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
 
-       down(&userspace_sem);
-       if (!cpu_is_managed[cpu])
+       mutex_lock(&userspace_mutex);
+       if (!cpu_is_managed[policy->cpu])
                goto err;
 
-       cpu_set_freq[cpu] = freq;
+       cpu_set_freq[policy->cpu] = freq;
 
-       if (freq < cpu_min_freq[cpu])
-               freq = cpu_min_freq[cpu];
-       if (freq > cpu_max_freq[cpu])
-               freq = cpu_max_freq[cpu];
+       if (freq < cpu_min_freq[policy->cpu])
+               freq = cpu_min_freq[policy->cpu];
+       if (freq > cpu_max_freq[policy->cpu])
+               freq = cpu_max_freq[policy->cpu];
 
        /*
         * We're safe from concurrent calls to ->target() here
-        * as we hold the userspace_sem lock. If we were calling
+        * as we hold the userspace_mutex lock. If we were calling
         * cpufreq_driver_target, a deadlock situation might occur:
-        * A: cpufreq_set (lock userspace_sem) -> cpufreq_driver_target(lock policy->lock)
-        * B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_sem)
+        * A: cpufreq_set (lock userspace_mutex) -> cpufreq_driver_target(lock policy->lock)
+        * B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_mutex)
         */
-       ret = __cpufreq_driver_target(&current_policy[cpu], freq, 
-             CPUFREQ_RELATION_L);
+       ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
 
  err:
-       up(&userspace_sem);
+       mutex_unlock(&userspace_mutex);
        return ret;
 }
 
@@ -113,7 +113,7 @@ store_speed (struct cpufreq_policy *policy, const char *buf, size_t count)
        if (ret != 1)
                return -EINVAL;
 
-       cpufreq_set(freq, policy->cpu);
+       cpufreq_set(freq, policy);
 
        return count;
 }
@@ -134,44 +134,48 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
                if (!cpu_online(cpu))
                        return -EINVAL;
                BUG_ON(!policy->cur);
-               down(&userspace_sem);
+               mutex_lock(&userspace_mutex);
                cpu_is_managed[cpu] = 1;                
                cpu_min_freq[cpu] = policy->min;
                cpu_max_freq[cpu] = policy->max;
                cpu_cur_freq[cpu] = policy->cur;
                cpu_set_freq[cpu] = policy->cur;
                sysfs_create_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
-               memcpy (&current_policy[cpu], policy, sizeof(struct cpufreq_policy));
                dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]);
-               up(&userspace_sem);
+               mutex_unlock(&userspace_mutex);
                break;
        case CPUFREQ_GOV_STOP:
-               down(&userspace_sem);
+               mutex_lock(&userspace_mutex);
                cpu_is_managed[cpu] = 0;
                cpu_min_freq[cpu] = 0;
                cpu_max_freq[cpu] = 0;
                cpu_set_freq[cpu] = 0;
                sysfs_remove_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
                dprintk("managing cpu %u stopped\n", cpu);
-               up(&userspace_sem);
+               mutex_unlock(&userspace_mutex);
                break;
        case CPUFREQ_GOV_LIMITS:
-               down(&userspace_sem);
-               cpu_min_freq[cpu] = policy->min;
-               cpu_max_freq[cpu] = policy->max;
-               dprintk("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu], cpu_set_freq[cpu]);
+               mutex_lock(&userspace_mutex);
+               dprintk("limit event for cpu %u: %u - %u kHz,"
+                       "currently %u kHz, last set to %u kHz\n",
+                       cpu, policy->min, policy->max,
+                       cpu_cur_freq[cpu], cpu_set_freq[cpu]);
                if (policy->max < cpu_set_freq[cpu]) {
-                       __cpufreq_driver_target(&current_policy[cpu], policy->max, 
-                             CPUFREQ_RELATION_H);
-               } else if (policy->min > cpu_set_freq[cpu]) {
-                       __cpufreq_driver_target(&current_policy[cpu], policy->min, 
-                             CPUFREQ_RELATION_L);
-               } else {
-                       __cpufreq_driver_target(&current_policy[cpu], cpu_set_freq[cpu],
-                             CPUFREQ_RELATION_L);
+                       __cpufreq_driver_target(policy, policy->max,
+                                               CPUFREQ_RELATION_H);
+               }
+               else if (policy->min > cpu_set_freq[cpu]) {
+                       __cpufreq_driver_target(policy, policy->min,
+                                               CPUFREQ_RELATION_L);
                }
-               memcpy (&current_policy[cpu], policy, sizeof(struct cpufreq_policy));
-               up(&userspace_sem);
+               else {
+                       __cpufreq_driver_target(policy, cpu_set_freq[cpu],
+                                               CPUFREQ_RELATION_L);
+               }
+               cpu_min_freq[cpu] = policy->min;
+               cpu_max_freq[cpu] = policy->max;
+               cpu_cur_freq[cpu] = policy->cur;
+               mutex_unlock(&userspace_mutex);
                break;
        }
        return 0;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
new file mode 100644 (file)
index 0000000..4819e7f
--- /dev/null
@@ -0,0 +1,102 @@
+#
+#      EDAC Kconfig
+#      Copyright (c) 2003 Linux Networx
+#      Licensed and distributed under the GPL
+#
+# $Id: Kconfig,v 1.4.2.7 2005/07/08 22:05:38 dsp_llnl Exp $
+#
+
+menu 'EDAC - error detection and reporting (RAS)'
+
+config EDAC
+       tristate "EDAC core system error reporting"
+       depends on X86
+       default y
+       help
+         EDAC is designed to report errors in the core system.
+         These are low-level errors that are reported in the CPU or
+         supporting chipset: memory errors, cache errors, PCI errors,
+         thermal throttling, etc..  If unsure, select 'Y'.
+
+
+comment "Reporting subsystems"
+       depends on EDAC
+
+config EDAC_DEBUG
+       bool "Debugging"
+       depends on EDAC
+       help
+         This turns on debugging information for the entire EDAC
+         sub-system. You can insert module with "debug_level=x", current
+         there're four debug levels (x=0,1,2,3 from low to high).
+         Usually you should select 'N'.
+
+config EDAC_MM_EDAC
+       tristate "Main Memory EDAC (Error Detection And Correction) reporting"
+       depends on EDAC
+       default y
+       help
+         Some systems are able to detect and correct errors in main
+         memory.  EDAC can report statistics on memory error
+         detection and correction (EDAC - or commonly referred to ECC
+         errors).  EDAC will also try to decode where these errors
+         occurred so that a particular failing memory module can be
+         replaced.  If unsure, select 'Y'.
+
+
+config EDAC_AMD76X
+       tristate "AMD 76x (760, 762, 768)"
+       depends on EDAC_MM_EDAC  && PCI
+       help
+         Support for error detection and correction on the AMD 76x
+         series of chipsets used with the Athlon processor.
+
+config EDAC_E7XXX
+       tristate "Intel e7xxx (e7205, e7500, e7501, e7505)"
+       depends on EDAC_MM_EDAC && PCI
+       help
+         Support for error detection and correction on the Intel
+         E7205, E7500, E7501 and E7505 server chipsets.
+
+config EDAC_E752X
+       tristate "Intel e752x (e7520, e7525, e7320)"
+       depends on EDAC_MM_EDAC && PCI
+       help
+         Support for error detection and correction on the Intel
+         E7520, E7525, E7320 server chipsets.
+
+config EDAC_I82875P
+       tristate "Intel 82875p (D82875P, E7210)"
+       depends on EDAC_MM_EDAC && PCI
+       help
+         Support for error detection and correction on the Intel
+         DP82785P and E7210 server chipsets.
+
+config EDAC_I82860
+       tristate "Intel 82860"
+       depends on EDAC_MM_EDAC && PCI
+       help
+         Support for error detection and correction on the Intel
+         82860 chipset.
+
+config EDAC_R82600
+       tristate "Radisys 82600 embedded chipset"
+       depends on EDAC_MM_EDAC
+       help
+         Support for error detection and correction on the Radisys
+         82600 embedded chipset.
+
+choice
+       prompt "Error detecting method"
+       depends on EDAC
+       default EDAC_POLL
+
+config EDAC_POLL
+       bool "Poll for errors"
+       depends on EDAC
+       help
+         Poll the chipset periodically to detect errors.
+
+endchoice
+
+endmenu
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
new file mode 100644 (file)
index 0000000..93137fd
--- /dev/null
@@ -0,0 +1,18 @@
+#
+# Makefile for the Linux kernel EDAC drivers.
+#
+# Copyright 02 Jul 2003, Linux Networx (http://lnxi.com)
+# This file may be distributed under the terms of the
+# GNU General Public License.
+#
+# $Id: Makefile,v 1.4.2.3 2005/07/08 22:05:38 dsp_llnl Exp $
+
+
+obj-$(CONFIG_EDAC_MM_EDAC)             += edac_mc.o
+obj-$(CONFIG_EDAC_AMD76X)              += amd76x_edac.o
+obj-$(CONFIG_EDAC_E7XXX)               += e7xxx_edac.o
+obj-$(CONFIG_EDAC_E752X)               += e752x_edac.o
+obj-$(CONFIG_EDAC_I82875P)             += i82875p_edac.o
+obj-$(CONFIG_EDAC_I82860)              += i82860_edac.o
+obj-$(CONFIG_EDAC_R82600)              += r82600_edac.o
+
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
new file mode 100644 (file)
index 0000000..2fcc812
--- /dev/null
@@ -0,0 +1,356 @@
+/*
+ * AMD 76x Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ *     http://www.anime.net/~goemon/linux-ecc/
+ *
+ * $Id: edac_amd76x.c,v 1.4.2.5 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_mc.h"
+
+
+#define AMD76X_NR_CSROWS 8
+#define AMD76X_NR_CHANS  1
+#define AMD76X_NR_DIMMS  4
+
+
+/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
+#define AMD76X_ECC_MODE_STATUS 0x48    /* Mode and status of ECC (32b)
+                                        *
+                                        * 31:16 reserved
+                                        * 15:14 SERR enabled: x1=ue 1x=ce
+                                        * 13    reserved
+                                        * 12    diag: disabled, enabled
+                                        * 11:10 mode: dis, EC, ECC, ECC+scrub
+                                        *  9:8  status: x1=ue 1x=ce
+                                        *  7:4  UE cs row
+                                        *  3:0  CE cs row
+                                        */
+#define AMD76X_DRAM_MODE_STATUS        0x58    /* DRAM Mode and status (32b)
+                                        *
+                                        * 31:26 clock disable 5 - 0
+                                        * 25    SDRAM init
+                                        * 24    reserved
+                                        * 23    mode register service
+                                        * 22:21 suspend to RAM
+                                        * 20    burst refresh enable
+                                        * 19    refresh disable
+                                        * 18    reserved
+                                        * 17:16 cycles-per-refresh
+                                        * 15:8  reserved
+                                        *  7:0  x4 mode enable 7 - 0
+                                        */
+#define AMD76X_MEM_BASE_ADDR   0xC0    /* Memory base address (8 x 32b)
+                                        *
+                                        * 31:23 chip-select base
+                                        * 22:16 reserved
+                                        * 15:7  chip-select mask
+                                        *  6:3  reserved
+                                        *  2:1  address mode
+                                        *  0    chip-select enable
+                                        */
+
+
+struct amd76x_error_info {
+       u32 ecc_mode_status;
+};
+
+
+enum amd76x_chips {
+       AMD761 = 0,
+       AMD762
+};
+
+
+struct amd76x_dev_info {
+       const char *ctl_name;
+};
+
+
+static const struct amd76x_dev_info amd76x_devs[] = {
+       [AMD761] = {.ctl_name = "AMD761"},
+       [AMD762] = {.ctl_name = "AMD762"},
+};
+
+
+/**
+ *     amd76x_get_error_info   -       fetch error information
+ *     @mci: Memory controller
+ *     @info: Info to fill in
+ *
+ *     Fetch and store the AMD76x ECC status. Clear pending status
+ *     on the chip so that further errors will be reported
+ */
+
+static void amd76x_get_error_info (struct mem_ctl_info *mci,
+                                  struct amd76x_error_info *info)
+{
+       pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS,
+                               &info->ecc_mode_status);
+
+       if (info->ecc_mode_status & BIT(8))
+               pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
+                                  (u32) BIT(8), (u32) BIT(8));
+
+       if (info->ecc_mode_status & BIT(9))
+               pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
+                                  (u32) BIT(9), (u32) BIT(9));
+}
+
+
+/**
+ *     amd76x_process_error_info       -       Error check
+ *     @mci: Memory controller
+ *     @info: Previously fetched information from chip
+ *     @handle_errors: 1 if we should do recovery
+ *
+ *     Process the chip state and decide if an error has occurred.
+ *     A return of 1 indicates an error. Also if handle_errors is true
+ *     then attempt to handle and clean up after the error
+ */
+
+static int amd76x_process_error_info (struct mem_ctl_info *mci,
+               struct amd76x_error_info *info, int handle_errors)
+{
+       int error_found;
+       u32 row;
+
+       error_found = 0;
+
+       /*
+        *      Check for an uncorrectable error
+        */
+       if (info->ecc_mode_status & BIT(8)) {
+               error_found = 1;
+
+               if (handle_errors) {
+                       row = (info->ecc_mode_status >> 4) & 0xf;
+                       edac_mc_handle_ue(mci,
+                           mci->csrows[row].first_page, 0, row,
+                           mci->ctl_name);
+               }
+       }
+
+       /*
+        *      Check for a correctable error
+        */
+       if (info->ecc_mode_status & BIT(9)) {
+               error_found = 1;
+
+               if (handle_errors) {
+                       row = info->ecc_mode_status & 0xf;
+                       edac_mc_handle_ce(mci,
+                           mci->csrows[row].first_page, 0, 0, row, 0,
+                           mci->ctl_name);
+               }
+       }
+       return error_found;
+}
+
+/**
+ *     amd76x_check    -       Poll the controller
+ *     @mci: Memory controller
+ *
+ *     Called by the poll handlers this function reads the status
+ *     from the controller and checks for errors.
+ */
+
+static void amd76x_check(struct mem_ctl_info *mci)
+{
+       struct amd76x_error_info info;
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+       amd76x_get_error_info(mci, &info);
+       amd76x_process_error_info(mci, &info, 1);
+}
+
+
+/**
+ *     amd76x_probe1   -       Perform set up for detected device
+ *     @pdev; PCI device detected
+ *     @dev_idx: Device type index
+ *
+ *     We have found an AMD76x and now need to set up the memory
+ *     controller status reporting. We configure and set up the
+ *     memory controller reporting and claim the device.
+ */
+
+static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+       int rc = -ENODEV;
+       int index;
+       struct mem_ctl_info *mci = NULL;
+       enum edac_type ems_modes[] = {
+               EDAC_NONE,
+               EDAC_EC,
+               EDAC_SECDED,
+               EDAC_SECDED
+       };
+       u32 ems;
+       u32 ems_mode;
+
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+       pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
+       ems_mode = (ems >> 10) & 0x3;
+
+       mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
+
+       if (mci == NULL) {
+               rc = -ENOMEM;
+               goto fail;
+       }
+
+       debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+       mci->pdev = pci_dev_get(pdev);
+       mci->mtype_cap = MEM_FLAG_RDDR;
+
+       mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+       mci->edac_cap = ems_mode ?
+           (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
+
+       mci->mod_name = BS_MOD_STR;
+       mci->mod_ver = "$Revision: 1.4.2.5 $";
+       mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
+       mci->edac_check = amd76x_check;
+       mci->ctl_page_to_phys = NULL;
+
+       for (index = 0; index < mci->nr_csrows; index++) {
+               struct csrow_info *csrow = &mci->csrows[index];
+               u32 mba;
+               u32 mba_base;
+               u32 mba_mask;
+               u32 dms;
+
+               /* find the DRAM Chip Select Base address and mask */
+               pci_read_config_dword(mci->pdev,
+                                     AMD76X_MEM_BASE_ADDR + (index * 4),
+                                     &mba);
+
+               if (!(mba & BIT(0)))
+                       continue;
+
+               mba_base = mba & 0xff800000UL;
+               mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
+
+               pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS,
+                                     &dms);
+
+               csrow->first_page = mba_base >> PAGE_SHIFT;
+               csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
+               csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+               csrow->page_mask = mba_mask >> PAGE_SHIFT;
+               csrow->grain = csrow->nr_pages << PAGE_SHIFT;
+               csrow->mtype = MEM_RDDR;
+               csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
+               csrow->edac_mode = ems_modes[ems_mode];
+       }
+
+       /* clear counters */
+       pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, (u32) (0x3 << 8),
+                        (u32) (0x3 << 8));
+
+       if (edac_mc_add_mc(mci)) {
+               debugf3("MC: " __FILE__
+                       ": %s(): failed edac_mc_add_mc()\n", __func__);
+               goto fail;
+       }
+
+       /* get this far and it's successful */
+       debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+       return 0;
+
+fail:
+       if (mci) {
+               if(mci->pdev)
+                       pci_dev_put(mci->pdev);
+               edac_mc_free(mci);
+       }
+       return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit amd76x_init_one(struct pci_dev *pdev,
+                                    const struct pci_device_id *ent)
+{
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+       /* don't need to call pci_device_enable() */
+       return amd76x_probe1(pdev, ent->driver_data);
+}
+
+
+/**
+ *     amd76x_remove_one       -       driver shutdown
+ *     @pdev: PCI device being handed back
+ *
+ *     Called when the driver is unloaded. Find the matching mci
+ *     structure for the device then delete the mci and free the
+ *     resources.
+ */
+
+static void __devexit amd76x_remove_one(struct pci_dev *pdev)
+{
+       struct mem_ctl_info *mci;
+
+       debugf0(__FILE__ ": %s()\n", __func__);
+
+       if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
+               return;
+       if (edac_mc_del_mc(mci))
+               return;
+       pci_dev_put(mci->pdev);
+       edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
+       {PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+        AMD762},
+       {PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+        AMD761},
+       {0,}                    /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
+
+
+static struct pci_driver amd76x_driver = {
+       .name = BS_MOD_STR,
+       .probe = amd76x_init_one,
+       .remove = __devexit_p(amd76x_remove_one),
+       .id_table = amd76x_pci_tbl,
+};
+
+static int __init amd76x_init(void)
+{
+       return pci_register_driver(&amd76x_driver);
+}
+
+static void __exit amd76x_exit(void)
+{
+       pci_unregister_driver(&amd76x_driver);
+}
+
+module_init(amd76x_init);
+module_exit(amd76x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
+MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
new file mode 100644 (file)
index 0000000..770a5a6
--- /dev/null
@@ -0,0 +1,1071 @@
+/*
+ * Intel e752x Memory Controller kernel module
+ * (C) 2004 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * See "enum e752x_chips" below for supported chipsets
+ *
+ * Written by Tom Zimmerman
+ *
+ * Contributors:
+ *     Thayne Harbaugh at realmsys.com (?)
+ *     Wang Zhenyu at intel.com
+ *     Dave Jiang at mvista.com
+ *
+ * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_mc.h"
+
+
+#ifndef PCI_DEVICE_ID_INTEL_7520_0
+#define PCI_DEVICE_ID_INTEL_7520_0      0x3590
+#endif                         /* PCI_DEVICE_ID_INTEL_7520_0      */
+
+#ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
+#define PCI_DEVICE_ID_INTEL_7520_1_ERR  0x3591
+#endif                         /* PCI_DEVICE_ID_INTEL_7520_1_ERR  */
+
+#ifndef PCI_DEVICE_ID_INTEL_7525_0
+#define PCI_DEVICE_ID_INTEL_7525_0      0x359E
+#endif                         /* PCI_DEVICE_ID_INTEL_7525_0      */
+
+#ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
+#define PCI_DEVICE_ID_INTEL_7525_1_ERR  0x3593
+#endif                         /* PCI_DEVICE_ID_INTEL_7525_1_ERR  */
+
+#ifndef PCI_DEVICE_ID_INTEL_7320_0
+#define PCI_DEVICE_ID_INTEL_7320_0     0x3592
+#endif                         /* PCI_DEVICE_ID_INTEL_7320_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
+#define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
+#endif                         /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
+
+#define E752X_NR_CSROWS                8       /* number of csrows */
+
+
+/* E752X register addresses - device 0 function 0 */
+#define E752X_DRB              0x60    /* DRAM row boundary register (8b) */
+#define E752X_DRA              0x70    /* DRAM row attribute register (8b) */
+                                       /*
+                                        * 31:30   Device width row 7
+                                        *      01=x8 10=x4 11=x8 DDR2
+                                        * 27:26   Device width row 6
+                                        * 23:22   Device width row 5
+                                        * 19:20   Device width row 4
+                                        * 15:14   Device width row 3
+                                        * 11:10   Device width row 2
+                                        *  7:6    Device width row 1
+                                        *  3:2    Device width row 0
+                                        */
+#define E752X_DRC              0x7C    /* DRAM controller mode reg (32b) */
+                                       /* FIXME:IS THIS RIGHT? */
+                                       /*
+                                        * 22    Number channels 0=1,1=2
+                                        * 19:18 DRB Granularity 32/64MB
+                                        */
+#define E752X_DRM              0x80    /* Dimm mapping register */
+#define E752X_DDRCSR           0x9A    /* DDR control and status reg (16b) */
+                                       /*
+                                        * 14:12 1 single A, 2 single B, 3 dual
+                                        */
+#define E752X_TOLM             0xC4    /* DRAM top of low memory reg (16b) */
+#define E752X_REMAPBASE                0xC6    /* DRAM remap base address reg (16b) */
+#define E752X_REMAPLIMIT       0xC8    /* DRAM remap limit address reg (16b) */
+#define E752X_REMAPOFFSET      0xCA    /* DRAM remap limit offset reg (16b) */
+
+/* E752X register addresses - device 0 function 1 */
+#define E752X_FERR_GLOBAL      0x40    /* Global first error register (32b) */
+#define E752X_NERR_GLOBAL      0x44    /* Global next error register (32b) */
+#define E752X_HI_FERR          0x50    /* Hub interface first error reg (8b) */
+#define E752X_HI_NERR          0x52    /* Hub interface next error reg (8b) */
+#define E752X_HI_ERRMASK       0x54    /* Hub interface error mask reg (8b) */
+#define E752X_HI_SMICMD                0x5A    /* Hub interface SMI command reg (8b) */
+#define E752X_SYSBUS_FERR      0x60    /* System buss first error reg (16b) */
+#define E752X_SYSBUS_NERR      0x62    /* System buss next error reg (16b) */
+#define E752X_SYSBUS_ERRMASK   0x64    /* System buss error mask reg (16b) */
+#define E752X_SYSBUS_SMICMD    0x6A    /* System buss SMI command reg (16b) */
+#define E752X_BUF_FERR         0x70    /* Memory buffer first error reg (8b) */
+#define E752X_BUF_NERR         0x72    /* Memory buffer next error reg (8b) */
+#define E752X_BUF_ERRMASK      0x74    /* Memory buffer error mask reg (8b) */
+#define E752X_BUF_SMICMD       0x7A    /* Memory buffer SMI command reg (8b) */
+#define E752X_DRAM_FERR                0x80    /* DRAM first error register (16b) */
+#define E752X_DRAM_NERR                0x82    /* DRAM next error register (16b) */
+#define E752X_DRAM_ERRMASK     0x84    /* DRAM error mask register (8b) */
+#define E752X_DRAM_SMICMD      0x8A    /* DRAM SMI command register (8b) */
+#define E752X_DRAM_RETR_ADD    0xAC    /* DRAM Retry address register (32b) */
+#define E752X_DRAM_SEC1_ADD    0xA0    /* DRAM first correctable memory */
+                                       /*     error address register (32b) */
+                                       /*
+                                        * 31    Reserved
+                                        * 30:2  CE address (64 byte block 34:6)
+                                        * 1     Reserved
+                                        * 0     HiLoCS
+                                        */
+#define E752X_DRAM_SEC2_ADD    0xC8    /* DRAM first correctable memory */
+                                       /*     error address register (32b) */
+                                       /*
+                                        * 31    Reserved
+                                        * 30:2  CE address (64 byte block 34:6)
+                                        * 1     Reserved
+                                        * 0     HiLoCS
+                                        */
+#define E752X_DRAM_DED_ADD     0xA4    /* DRAM first uncorrectable memory */
+                                       /*     error address register (32b) */
+                                       /*
+                                        * 31    Reserved
+                                        * 30:2  CE address (64 byte block 34:6)
+                                        * 1     Reserved
+                                        * 0     HiLoCS
+                                        */
+#define E752X_DRAM_SCRB_ADD    0xA8    /* DRAM first uncorrectable scrub memory */
+                                       /*     error address register (32b) */
+                                       /*
+                                        * 31    Reserved
+                                        * 30:2  CE address (64 byte block 34:6)
+                                        * 1     Reserved
+                                        * 0     HiLoCS
+                                        */
+#define E752X_DRAM_SEC1_SYNDROME 0xC4  /* DRAM first correctable memory */
+                                       /*     error syndrome register (16b) */
+#define E752X_DRAM_SEC2_SYNDROME 0xC6  /* DRAM second correctable memory */
+                                       /*     error syndrome register (16b) */
+#define E752X_DEVPRES1         0xF4    /* Device Present 1 register (8b) */
+
+/* ICH5R register addresses - device 30 function 0 */
+#define ICH5R_PCI_STAT         0x06    /* PCI status register (16b) */
+#define ICH5R_PCI_2ND_STAT     0x1E    /* PCI status secondary reg (16b) */
+#define ICH5R_PCI_BRIDGE_CTL   0x3E    /* PCI bridge control register (16b) */
+
+enum e752x_chips {
+       E7520 = 0,
+       E7525 = 1,
+       E7320 = 2
+};
+
+
+struct e752x_pvt {
+       struct pci_dev *bridge_ck;
+       struct pci_dev *dev_d0f0;
+       struct pci_dev *dev_d0f1;
+       u32 tolm;
+       u32 remapbase;
+       u32 remaplimit;
+       int mc_symmetric;
+       u8 map[8];
+       int map_type;
+       const struct e752x_dev_info *dev_info;
+};
+
+
+struct e752x_dev_info {
+       u16 err_dev;
+       const char *ctl_name;
+};
+
+struct e752x_error_info {
+       u32 ferr_global;
+       u32 nerr_global;
+       u8 hi_ferr;
+       u8 hi_nerr;
+       u16 sysbus_ferr;
+       u16 sysbus_nerr;
+       u8 buf_ferr;
+       u8 buf_nerr;
+       u16 dram_ferr;
+       u16 dram_nerr;
+       u32 dram_sec1_add;
+       u32 dram_sec2_add;
+       u16 dram_sec1_syndrome;
+       u16 dram_sec2_syndrome;
+       u32 dram_ded_add;
+       u32 dram_scrb_add;
+       u32 dram_retr_add;
+};
+
+static const struct e752x_dev_info e752x_devs[] = {
+       [E7520] = {
+                  .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
+                  .ctl_name = "E7520"},
+       [E7525] = {
+                  .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
+                  .ctl_name = "E7525"},
+       [E7320] = {
+                  .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
+                  .ctl_name = "E7320"},
+};
+
+
+static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
+                                     unsigned long page)
+{
+       u32 remap;
+       struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       if (page < pvt->tolm)
+               return page;
+       if ((page >= 0x100000) && (page < pvt->remapbase))
+               return page;
+       remap = (page - pvt->tolm) + pvt->remapbase;
+       if (remap < pvt->remaplimit)
+               return remap;
+       printk(KERN_ERR "Invalid page %lx - out of range\n", page);
+       return pvt->tolm - 1;
+}
+
+static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
+                      u32 sec1_add, u16 sec1_syndrome)
+{
+       u32 page;
+       int row;
+       int channel;
+       int i;
+       struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       /* convert the addr to 4k page */
+       page = sec1_add >> (PAGE_SHIFT - 4);
+
+       /* FIXME - check for -1 */
+       if (pvt->mc_symmetric) {
+               /* chip select are bits 14 & 13 */
+               row = ((page >> 1) & 3);
+               printk(KERN_WARNING
+                      "Test row %d Table %d %d %d %d %d %d %d %d\n",
+                      row, pvt->map[0], pvt->map[1], pvt->map[2],
+                      pvt->map[3], pvt->map[4], pvt->map[5],
+                      pvt->map[6], pvt->map[7]);
+
+               /* test for channel remapping */
+               for (i = 0; i < 8; i++) {
+                       if (pvt->map[i] == row)
+                               break;
+               }
+               printk(KERN_WARNING "Test computed row %d\n", i);
+               if (i < 8)
+                       row = i;
+               else
+                       printk(KERN_WARNING
+                              "MC%d: row %d not found in remap table\n",
+                              mci->mc_idx, row);
+       } else
+               row = edac_mc_find_csrow_by_page(mci, page);
+       /* 0 = channel A, 1 = channel B */
+       channel = !(error_one & 1);
+
+       if (!pvt->map_type)
+               row = 7 - row;
+       edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel,
+           "e752x CE");
+}
+
+
+static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
+               u32 sec1_add, u16 sec1_syndrome, int *error_found,
+               int handle_error)
+{
+       *error_found = 1;
+
+       if (handle_error)
+               do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
+}
+
+static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, u32 ded_add,
+               u32 scrb_add)
+{
+       u32 error_2b, block_page;
+       int row;
+       struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       if (error_one & 0x0202) {
+               error_2b = ded_add;
+               /* convert to 4k address */
+               block_page = error_2b >> (PAGE_SHIFT - 4);
+               row = pvt->mc_symmetric ?
+                   /* chip select are bits 14 & 13 */
+                   ((block_page >> 1) & 3) :
+                   edac_mc_find_csrow_by_page(mci, block_page);
+               edac_mc_handle_ue(mci, block_page, 0, row,
+                                      "e752x UE from Read");
+       }
+       if (error_one & 0x0404) {
+               error_2b = scrb_add;
+               /* convert to 4k address */
+               block_page = error_2b >> (PAGE_SHIFT - 4);
+               row = pvt->mc_symmetric ?
+                   /* chip select are bits 14 & 13 */
+                   ((block_page >> 1) & 3) :
+                   edac_mc_find_csrow_by_page(mci, block_page);
+               edac_mc_handle_ue(mci, block_page, 0, row,
+                                      "e752x UE from Scruber");
+       }
+}
+
+static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
+               u32 ded_add, u32 scrb_add, int *error_found, int handle_error)
+{
+       *error_found = 1;
+
+       if (handle_error)
+               do_process_ue(mci, error_one, ded_add, scrb_add);
+}
+
+static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
+               int *error_found, int handle_error)
+{
+       *error_found = 1;
+
+       if (!handle_error)
+               return;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+       edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
+}
+
+static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
+               u32 retry_add)
+{
+       u32 error_1b, page;
+       int row;
+       struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+
+       error_1b = retry_add;
+       page = error_1b >> (PAGE_SHIFT - 4);    /* convert the addr to 4k page */
+       row = pvt->mc_symmetric ?
+           ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
+           edac_mc_find_csrow_by_page(mci, page);
+       printk(KERN_WARNING
+              "MC%d: CE page 0x%lx, row %d : Memory read retry\n",
+              mci->mc_idx, (long unsigned int) page, row);
+}
+
+static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
+               u32 retry_add, int *error_found, int handle_error)
+{
+       *error_found = 1;
+
+       if (handle_error)
+               do_process_ded_retry(mci, error, retry_add);
+}
+
+static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
+               int *error_found, int handle_error)
+{
+       *error_found = 1;
+
+       if (handle_error)
+               printk(KERN_WARNING "MC%d: Memory threshold CE\n",
+                      mci->mc_idx);
+}
+
+static char *global_message[11] = {
+       "PCI Express C1", "PCI Express C", "PCI Express B1",
+       "PCI Express B", "PCI Express A1", "PCI Express A",
+       "DMA Controler", "HUB Interface", "System Bus",
+       "DRAM Controler", "Internal Buffer"
+};
+
+static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
+
+static void do_global_error(int fatal, u32 errors)
+{
+       int i;
+
+       for (i = 0; i < 11; i++) {
+               if (errors & (1 << i))
+                       printk(KERN_WARNING "%sError %s\n",
+                              fatal_message[fatal], global_message[i]);
+       }
+}
+
+static inline void global_error(int fatal, u32 errors, int *error_found,
+               int handle_error)
+{
+       *error_found = 1;
+
+       if (handle_error)
+               do_global_error(fatal, errors);
+}
+
+static char *hub_message[7] = {
+       "HI Address or Command Parity", "HI Illegal Access",
+       "HI Internal Parity", "Out of Range Access",
+       "HI Data Parity", "Enhanced Config Access",
+       "Hub Interface Target Abort"
+};
+
+static void do_hub_error(int fatal, u8 errors)
+{
+       int i;
+
+       for (i = 0; i < 7; i++) {
+               if (errors & (1 << i))
+                       printk(KERN_WARNING "%sError %s\n",
+                              fatal_message[fatal], hub_message[i]);
+       }
+}
+
+static inline void hub_error(int fatal, u8 errors, int *error_found,
+               int handle_error)
+{
+       *error_found = 1;
+
+       if (handle_error)
+               do_hub_error(fatal, errors);
+}
+
+static char *membuf_message[4] = {
+       "Internal PMWB to DRAM parity",
+       "Internal PMWB to System Bus Parity",
+       "Internal System Bus or IO to PMWB Parity",
+       "Internal DRAM to PMWB Parity"
+};
+
+static void do_membuf_error(u8 errors)
+{
+       int i;
+
+       for (i = 0; i < 4; i++) {
+               if (errors & (1 << i))
+                       printk(KERN_WARNING "Non-Fatal Error %s\n",
+                              membuf_message[i]);
+       }
+}
+
+static inline void membuf_error(u8 errors, int *error_found, int handle_error)
+{
+       *error_found = 1;
+
+       if (handle_error)
+               do_membuf_error(errors);
+}
+
+#if 0
+char *sysbus_message[10] = {
+       "Addr or Request Parity",
+       "Data Strobe Glitch",
+       "Addr Strobe Glitch",
+       "Data Parity",
+       "Addr Above TOM",
+       "Non DRAM Lock Error",
+       "MCERR", "BINIT",
+       "Memory Parity",
+       "IO Subsystem Parity"
+};
+#endif  /*  0  */
+
+static void do_sysbus_error(int fatal, u32 errors)
+{
+       int i;
+
+       for (i = 0; i < 10; i++) {
+               if (errors & (1 << i))
+                       printk(KERN_WARNING "%sError System Bus %s\n",
+                              fatal_message[fatal], global_message[i]);
+       }
+}
+
+static inline void sysbus_error(int fatal, u32 errors, int *error_found,
+               int handle_error)
+{
+       *error_found = 1;
+
+       if (handle_error)
+               do_sysbus_error(fatal, errors);
+}
+
+static void e752x_check_hub_interface (struct e752x_error_info *info,
+               int *error_found, int handle_error)
+{
+       u8 stat8;
+
+       //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
+       stat8 = info->hi_ferr;
+       if(stat8 & 0x7f) { /* Error, so process */
+               stat8 &= 0x7f;
+               if(stat8 & 0x2b)
+                       hub_error(1, stat8 & 0x2b, error_found, handle_error);
+               if(stat8 & 0x54)
+                       hub_error(0, stat8 & 0x54, error_found, handle_error);
+       }
+       //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
+       stat8 = info->hi_nerr;
+       if(stat8 & 0x7f) { /* Error, so process */
+               stat8 &= 0x7f;
+               if (stat8 & 0x2b)
+                       hub_error(1, stat8 & 0x2b, error_found, handle_error);
+               if(stat8 & 0x54)
+                       hub_error(0, stat8 & 0x54, error_found, handle_error);
+       }
+}
+
+static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found,
+               int handle_error)
+{
+       u32 stat32, error32;
+
+       //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
+       stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
+
+       if (stat32 == 0)
+               return;  /* no errors */
+
+       error32 = (stat32 >> 16) & 0x3ff;
+       stat32 = stat32 & 0x3ff;
+       if(stat32 & 0x083)
+               sysbus_error(1, stat32 & 0x083, error_found, handle_error);
+       if(stat32 & 0x37c)
+               sysbus_error(0, stat32 & 0x37c, error_found, handle_error);
+       if(error32 & 0x083)
+               sysbus_error(1, error32 & 0x083, error_found, handle_error);
+       if(error32 & 0x37c)
+               sysbus_error(0, error32 & 0x37c, error_found, handle_error);
+}
+
+static void e752x_check_membuf (struct e752x_error_info *info, int *error_found,
+               int handle_error)
+{
+       u8 stat8;
+
+       stat8 = info->buf_ferr;
+       if (stat8 & 0x0f) { /* Error, so process */
+               stat8 &= 0x0f;
+               membuf_error(stat8, error_found, handle_error);
+       }
+       stat8 = info->buf_nerr;
+       if (stat8 & 0x0f) { /* Error, so process */
+               stat8 &= 0x0f;
+               membuf_error(stat8, error_found, handle_error);
+       }
+}
+
+static void e752x_check_dram (struct mem_ctl_info *mci,
+               struct e752x_error_info *info, int *error_found, int handle_error)
+{
+       u16 error_one, error_next;
+
+       error_one = info->dram_ferr;
+       error_next = info->dram_nerr;
+
+       /* decode and report errors */
+       if(error_one & 0x0101)  /* check first error correctable */
+               process_ce(mci, error_one, info->dram_sec1_add,
+                          info->dram_sec1_syndrome, error_found,
+                          handle_error);
+
+       if(error_next & 0x0101)  /* check next error correctable */
+               process_ce(mci, error_next, info->dram_sec2_add,
+                          info->dram_sec2_syndrome, error_found,
+                          handle_error);
+
+       if(error_one & 0x4040)
+               process_ue_no_info_wr(mci, error_found, handle_error);
+
+       if(error_next & 0x4040)
+               process_ue_no_info_wr(mci, error_found, handle_error);
+
+       if(error_one & 0x2020)
+               process_ded_retry(mci, error_one, info->dram_retr_add,
+                                 error_found, handle_error);
+
+       if(error_next & 0x2020)
+               process_ded_retry(mci, error_next, info->dram_retr_add,
+                                 error_found, handle_error);
+
+       if(error_one & 0x0808)
+               process_threshold_ce(mci, error_one, error_found,
+                                    handle_error);
+
+       if(error_next & 0x0808)
+               process_threshold_ce(mci, error_next, error_found,
+                                    handle_error);
+
+       if(error_one & 0x0606)
+               process_ue(mci, error_one, info->dram_ded_add,
+                          info->dram_scrb_add, error_found, handle_error);
+
+       if(error_next & 0x0606)
+               process_ue(mci, error_next, info->dram_ded_add,
+                          info->dram_scrb_add, error_found, handle_error);
+}
+
+static void e752x_get_error_info (struct mem_ctl_info *mci,
+                                 struct e752x_error_info *info)
+{
+       struct pci_dev *dev;
+       struct e752x_pvt *pvt;
+
+       memset(info, 0, sizeof(*info));
+       pvt = (struct e752x_pvt *) mci->pvt_info;
+       dev = pvt->dev_d0f1;
+
+       pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
+
+       if (info->ferr_global) {
+               pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr);
+               pci_read_config_word(dev, E752X_SYSBUS_FERR,
+                               &info->sysbus_ferr);
+               pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
+               pci_read_config_word(dev, E752X_DRAM_FERR,
+                               &info->dram_ferr);
+               pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
+                               &info->dram_sec1_add);
+               pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
+                               &info->dram_sec1_syndrome);
+               pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
+                               &info->dram_ded_add);
+               pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
+                               &info->dram_scrb_add);
+               pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
+                               &info->dram_retr_add);
+
+               if (info->hi_ferr & 0x7f)
+                       pci_write_config_byte(dev, E752X_HI_FERR,
+                                       info->hi_ferr);
+
+               if (info->sysbus_ferr)
+                       pci_write_config_word(dev, E752X_SYSBUS_FERR,
+                                       info->sysbus_ferr);
+
+               if (info->buf_ferr & 0x0f)
+                       pci_write_config_byte(dev, E752X_BUF_FERR,
+                                       info->buf_ferr);
+
+               if (info->dram_ferr)
+                       pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
+                                       info->dram_ferr, info->dram_ferr);
+
+               pci_write_config_dword(dev, E752X_FERR_GLOBAL,
+                               info->ferr_global);
+       }
+
+       pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
+
+       if (info->nerr_global) {
+               pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr);
+               pci_read_config_word(dev, E752X_SYSBUS_NERR,
+                               &info->sysbus_nerr);
+               pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
+               pci_read_config_word(dev, E752X_DRAM_NERR,
+                               &info->dram_nerr);
+               pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
+                               &info->dram_sec2_add);
+               pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
+                               &info->dram_sec2_syndrome);
+
+               if (info->hi_nerr & 0x7f)
+                       pci_write_config_byte(dev, E752X_HI_NERR,
+                                       info->hi_nerr);
+
+               if (info->sysbus_nerr)
+                       pci_write_config_word(dev, E752X_SYSBUS_NERR,
+                                       info->sysbus_nerr);
+
+               if (info->buf_nerr & 0x0f)
+                       pci_write_config_byte(dev, E752X_BUF_NERR,
+                                       info->buf_nerr);
+
+               if (info->dram_nerr)
+                       pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
+                                       info->dram_nerr, info->dram_nerr);
+
+               pci_write_config_dword(dev, E752X_NERR_GLOBAL,
+                               info->nerr_global);
+       }
+}
+
+static int e752x_process_error_info (struct mem_ctl_info *mci,
+               struct e752x_error_info *info, int handle_errors)
+{
+       u32 error32, stat32;
+       int error_found;
+
+       error_found = 0;
+       error32 = (info->ferr_global >> 18) & 0x3ff;
+       stat32 = (info->ferr_global >> 4) & 0x7ff;
+
+       if (error32)
+               global_error(1, error32, &error_found, handle_errors);
+
+       if (stat32)
+               global_error(0, stat32, &error_found, handle_errors);
+
+       error32 = (info->nerr_global >> 18) & 0x3ff;
+       stat32 = (info->nerr_global >> 4) & 0x7ff;
+
+       if (error32)
+               global_error(1, error32, &error_found, handle_errors);
+
+       if (stat32)
+               global_error(0, stat32, &error_found, handle_errors);
+
+       e752x_check_hub_interface(info, &error_found, handle_errors);
+       e752x_check_sysbus(info, &error_found, handle_errors);
+       e752x_check_membuf(info, &error_found, handle_errors);
+       e752x_check_dram(mci, info, &error_found, handle_errors);
+       return error_found;
+}
+
+static void e752x_check(struct mem_ctl_info *mci)
+{
+       struct e752x_error_info info;
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+       e752x_get_error_info(mci, &info);
+       e752x_process_error_info(mci, &info, 1);
+}
+
+static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+       int rc = -ENODEV;
+       int index;
+       u16 pci_data, stat;
+       u32 stat32;
+       u16 stat16;
+       u8 stat8;
+       struct mem_ctl_info *mci = NULL;
+       struct e752x_pvt *pvt = NULL;
+       u16 ddrcsr;
+       u32 drc;
+       int drc_chan;           /* Number of channels 0=1chan,1=2chan */
+       int drc_drbg;           /* DRB granularity 0=64mb,1=128mb */
+       int drc_ddim;           /* DRAM Data Integrity Mode 0=none,2=edac */
+       u32 dra;
+       unsigned long last_cumul_size;
+       struct pci_dev *pres_dev;
+       struct pci_dev *dev = NULL;
+
+       debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
+       debugf0("Starting Probe1\n");
+
+       /* enable device 0 function 1 */
+       pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
+       stat8 |= (1 << 5);
+       pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
+
+       /* need to find out the number of channels */
+       pci_read_config_dword(pdev, E752X_DRC, &drc);
+       pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
+       /* FIXME: should check >>12 or 0xf, true for all? */
+       /* Dual channel = 1, Single channel = 0 */
+       drc_chan = (((ddrcsr >> 12) & 3) == 3);
+       drc_drbg = drc_chan + 1;        /* 128 in dual mode, 64 in single */
+       drc_ddim = (drc >> 20) & 0x3;
+
+       mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1);
+
+       if (mci == NULL) {
+               rc = -ENOMEM;
+               goto fail;
+       }
+
+       debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
+
+       mci->mtype_cap = MEM_FLAG_RDDR;
+       mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
+           EDAC_FLAG_S4ECD4ED;
+       /* FIXME - what if different memory types are in different csrows? */
+       mci->mod_name = BS_MOD_STR;
+       mci->mod_ver = "$Revision: 1.5.2.11 $";
+       mci->pdev = pdev;
+
+       debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
+       pvt = (struct e752x_pvt *) mci->pvt_info;
+       pvt->dev_info = &e752x_devs[dev_idx];
+       pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
+                                        pvt->dev_info->err_dev,
+                                        pvt->bridge_ck);
+       if (pvt->bridge_ck == NULL)
+               pvt->bridge_ck = pci_scan_single_device(pdev->bus,
+                                                       PCI_DEVFN(0, 1));
+       if (pvt->bridge_ck == NULL) {
+               printk(KERN_ERR "MC: error reporting device not found:"
+                      "vendor %x device 0x%x (broken BIOS?)\n",
+                      PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
+               goto fail;
+       }
+       pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
+
+       debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
+       mci->ctl_name = pvt->dev_info->ctl_name;
+       mci->edac_check = e752x_check;
+       mci->ctl_page_to_phys = ctl_page_to_phys;
+
+       /* find out the device types */
+       pci_read_config_dword(pdev, E752X_DRA, &dra);
+
+       /*
+        * The dram row boundary (DRB) reg values are boundary address for
+        * each DRAM row with a granularity of 64 or 128MB (single/dual
+        * channel operation).  DRB regs are cumulative; therefore DRB7 will
+        * contain the total memory contained in all eight rows.
+        */
+       for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+               u8 value;
+               u32 cumul_size;
+               /* mem_dev 0=x8, 1=x4 */
+               int mem_dev = (dra >> (index * 4 + 2)) & 0x3;
+               struct csrow_info *csrow = &mci->csrows[index];
+
+               mem_dev = (mem_dev == 2);
+               pci_read_config_byte(mci->pdev, E752X_DRB + index, &value);
+               /* convert a 128 or 64 MiB DRB to a page size. */
+               cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
+               debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
+                       __func__, index, cumul_size);
+               if (cumul_size == last_cumul_size)
+                       continue;       /* not populated */
+
+               csrow->first_page = last_cumul_size;
+               csrow->last_page = cumul_size - 1;
+               csrow->nr_pages = cumul_size - last_cumul_size;
+               last_cumul_size = cumul_size;
+               csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+               csrow->mtype = MEM_RDDR;        /* only one type supported */
+               csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+               /*
+                * if single channel or x8 devices then SECDED
+                * if dual channel and x4 then S4ECD4ED
+                */
+               if (drc_ddim) {
+                       if (drc_chan && mem_dev) {
+                               csrow->edac_mode = EDAC_S4ECD4ED;
+                               mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+                       } else {
+                               csrow->edac_mode = EDAC_SECDED;
+                               mci->edac_cap |= EDAC_FLAG_SECDED;
+                       }
+               } else
+                       csrow->edac_mode = EDAC_NONE;
+       }
+
+       /* Fill in the memory map table */
+       {
+               u8 value;
+               u8 last = 0;
+               u8 row = 0;
+               for (index = 0; index < 8; index += 2) {
+
+                       pci_read_config_byte(mci->pdev, E752X_DRB + index,
+                                            &value);
+                       /* test if there is a dimm in this slot */
+                       if (value == last) {
+                               /* no dimm in the slot, so flag it as empty */
+                               pvt->map[index] = 0xff;
+                               pvt->map[index + 1] = 0xff;
+                       } else {        /* there is a dimm in the slot */
+                               pvt->map[index] = row;
+                               row++;
+                               last = value;
+                               /* test the next value to see if the dimm is
+                                  double sided */
+                               pci_read_config_byte(mci->pdev,
+                                                    E752X_DRB + index + 1,
+                                                    &value);
+                               pvt->map[index + 1] = (value == last) ?
+                                   0xff :      /* the dimm is single sided,
+                                                  so flag as empty */
+                                   row;        /* this is a double sided dimm
+                                                  to save the next row # */
+                               row++;
+                               last = value;
+                       }
+               }
+       }
+
+       /* set the map type.  1 = normal, 0 = reversed */
+       pci_read_config_byte(mci->pdev, E752X_DRM, &stat8);
+       pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
+
+       mci->edac_cap |= EDAC_FLAG_NONE;
+
+       debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
+               __func__);
+       /* load the top of low memory, remap base, and remap limit vars */
+       pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data);
+       pvt->tolm = ((u32) pci_data) << 4;
+       pci_read_config_word(mci->pdev, E752X_REMAPBASE, &pci_data);
+       pvt->remapbase = ((u32) pci_data) << 14;
+       pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data);
+       pvt->remaplimit = ((u32) pci_data) << 14;
+       printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
+              pvt->remapbase, pvt->remaplimit);
+
+       if (edac_mc_add_mc(mci)) {
+               debugf3("MC: " __FILE__
+                       ": %s(): failed edac_mc_add_mc()\n",
+                       __func__);
+               goto fail;
+       }
+
+       /* Walk through the PCI table and clear errors */
+       switch (dev_idx) {
+       case E7520:
+               dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+                                     PCI_DEVICE_ID_INTEL_7520_0, NULL);
+               break;
+       case E7525:
+               dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+                                     PCI_DEVICE_ID_INTEL_7525_0, NULL);
+               break;
+       case E7320:
+               dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+                                     PCI_DEVICE_ID_INTEL_7320_0, NULL);
+               break;
+       }
+
+
+       pvt->dev_d0f0 = dev;
+       for (pres_dev = dev;
+            ((struct pci_dev *) pres_dev->global_list.next != dev);
+            pres_dev = (struct pci_dev *) pres_dev->global_list.next) {
+               pci_read_config_dword(pres_dev, PCI_COMMAND, &stat32);
+               stat = (u16) (stat32 >> 16);
+               /* clear any error bits */
+               if (stat32 & ((1 << 6) + (1 << 8)))
+                       pci_write_config_word(pres_dev, PCI_STATUS, stat);
+       }
+       /* find the error reporting device and clear errors */
+       dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
+       /* Turn off error disable & SMI in case the BIOS turned it on */
+       pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
+       pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
+       pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
+       pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
+       pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
+       pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
+       pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
+       pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
+       /* clear other MCH errors */
+       pci_read_config_dword(dev, E752X_FERR_GLOBAL, &stat32);
+       pci_write_config_dword(dev, E752X_FERR_GLOBAL, stat32);
+       pci_read_config_dword(dev, E752X_NERR_GLOBAL, &stat32);
+       pci_write_config_dword(dev, E752X_NERR_GLOBAL, stat32);
+       pci_read_config_byte(dev, E752X_HI_FERR, &stat8);
+       pci_write_config_byte(dev, E752X_HI_FERR, stat8);
+       pci_read_config_byte(dev, E752X_HI_NERR, &stat8);
+       pci_write_config_byte(dev, E752X_HI_NERR, stat8);
+       pci_read_config_dword(dev, E752X_SYSBUS_FERR, &stat32);
+       pci_write_config_dword(dev, E752X_SYSBUS_FERR, stat32);
+       pci_read_config_byte(dev, E752X_BUF_FERR, &stat8);
+       pci_write_config_byte(dev, E752X_BUF_FERR, stat8);
+       pci_read_config_byte(dev, E752X_BUF_NERR, &stat8);
+       pci_write_config_byte(dev, E752X_BUF_NERR, stat8);
+       pci_read_config_word(dev, E752X_DRAM_FERR, &stat16);
+       pci_write_config_word(dev, E752X_DRAM_FERR, stat16);
+       pci_read_config_word(dev, E752X_DRAM_NERR, &stat16);
+       pci_write_config_word(dev, E752X_DRAM_NERR, stat16);
+
+       /* get this far and it's successful */
+       debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+       return 0;
+
+fail:
+       if (mci) {
+               if (pvt->dev_d0f0)
+                       pci_dev_put(pvt->dev_d0f0);
+               if (pvt->dev_d0f1)
+                       pci_dev_put(pvt->dev_d0f1);
+               if (pvt->bridge_ck)
+                       pci_dev_put(pvt->bridge_ck);
+               edac_mc_free(mci);
+       }
+       return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit e752x_init_one(struct pci_dev *pdev,
+                                   const struct pci_device_id *ent)
+{
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+       /* wake up and enable device */
+       if(pci_enable_device(pdev) < 0)
+               return -EIO;
+       return e752x_probe1(pdev, ent->driver_data);
+}
+
+
+static void __devexit e752x_remove_one(struct pci_dev *pdev)
+{
+       struct mem_ctl_info *mci;
+       struct e752x_pvt *pvt;
+
+       debugf0(__FILE__ ": %s()\n", __func__);
+
+       if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
+               return;
+
+       if (edac_mc_del_mc(mci))
+               return;
+
+       pvt = (struct e752x_pvt *) mci->pvt_info;
+       pci_dev_put(pvt->dev_d0f0);
+       pci_dev_put(pvt->dev_d0f1);
+       pci_dev_put(pvt->bridge_ck);
+       edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
+       {PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+        E7520},
+       {PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+        E7525},
+       {PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+        E7320},
+       {0,}                    /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
+
+
+static struct pci_driver e752x_driver = {
+      name: BS_MOD_STR,
+      probe: e752x_init_one,
+      remove: __devexit_p(e752x_remove_one),
+      id_table: e752x_pci_tbl,
+};
+
+
+static int __init e752x_init(void)
+{
+       int pci_rc;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+       pci_rc = pci_register_driver(&e752x_driver);
+       return (pci_rc < 0) ? pci_rc : 0;
+}
+
+
+static void __exit e752x_exit(void)
+{
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+       pci_unregister_driver(&e752x_driver);
+}
+
+
+module_init(e752x_init);
+module_exit(e752x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
+MODULE_DESCRIPTION("MC support for Intel e752x memory controllers");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
new file mode 100644 (file)
index 0000000..d5e320d
--- /dev/null
@@ -0,0 +1,558 @@
+/*
+ * Intel e7xxx Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * See "enum e7xxx_chips" below for supported chipsets
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ *     http://www.anime.net/~goemon/linux-ecc/
+ *
+ * Contributors:
+ *     Eric Biederman (Linux Networx)
+ *     Tom Zimmerman (Linux Networx)
+ *     Jim Garlick (Lawrence Livermore National Labs)
+ *     Dave Peterson (Lawrence Livermore National Labs)
+ *     That One Guy (Some other place)
+ *     Wang Zhenyu (intel.com)
+ *
+ * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include "edac_mc.h"
+
+
+#ifndef PCI_DEVICE_ID_INTEL_7205_0
+#define PCI_DEVICE_ID_INTEL_7205_0     0x255d
+#endif                         /* PCI_DEVICE_ID_INTEL_7205_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR
+#define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551
+#endif                         /* PCI_DEVICE_ID_INTEL_7205_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7500_0
+#define PCI_DEVICE_ID_INTEL_7500_0     0x2540
+#endif                         /* PCI_DEVICE_ID_INTEL_7500_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR
+#define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541
+#endif                         /* PCI_DEVICE_ID_INTEL_7500_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7501_0
+#define PCI_DEVICE_ID_INTEL_7501_0     0x254c
+#endif                         /* PCI_DEVICE_ID_INTEL_7501_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR
+#define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541
+#endif                         /* PCI_DEVICE_ID_INTEL_7501_1_ERR */
+
+#ifndef PCI_DEVICE_ID_INTEL_7505_0
+#define PCI_DEVICE_ID_INTEL_7505_0     0x2550
+#endif                         /* PCI_DEVICE_ID_INTEL_7505_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR
+#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551
+#endif                         /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
+
+
+#define E7XXX_NR_CSROWS                8       /* number of csrows */
+#define E7XXX_NR_DIMMS         8       /* FIXME - is this correct? */
+
+
+/* E7XXX register addresses - device 0 function 0 */
+#define E7XXX_DRB              0x60    /* DRAM row boundary register (8b) */
+#define E7XXX_DRA              0x70    /* DRAM row attribute register (8b) */
+                                       /*
+                                        * 31   Device width row 7 0=x8 1=x4
+                                        * 27   Device width row 6
+                                        * 23   Device width row 5
+                                        * 19   Device width row 4
+                                        * 15   Device width row 3
+                                        * 11   Device width row 2
+                                        *  7   Device width row 1
+                                        *  3   Device width row 0
+                                        */
+#define E7XXX_DRC              0x7C    /* DRAM controller mode reg (32b) */
+                                       /*
+                                        * 22    Number channels 0=1,1=2
+                                        * 19:18 DRB Granularity 32/64MB
+                                        */
+#define E7XXX_TOLM             0xC4    /* DRAM top of low memory reg (16b) */
+#define E7XXX_REMAPBASE                0xC6    /* DRAM remap base address reg (16b) */
+#define E7XXX_REMAPLIMIT       0xC8    /* DRAM remap limit address reg (16b) */
+
+/* E7XXX register addresses - device 0 function 1 */
+#define E7XXX_DRAM_FERR                0x80    /* DRAM first error register (8b) */
+#define E7XXX_DRAM_NERR                0x82    /* DRAM next error register (8b) */
+#define E7XXX_DRAM_CELOG_ADD   0xA0    /* DRAM first correctable memory */
+                                       /*     error address register (32b) */
+                                       /*
+                                        * 31:28 Reserved
+                                        * 27:6  CE address (4k block 33:12)
+                                        *  5:0  Reserved
+                                        */
+#define E7XXX_DRAM_UELOG_ADD   0xB0    /* DRAM first uncorrectable memory */
+                                       /*     error address register (32b) */
+                                       /*
+                                        * 31:28 Reserved
+                                        * 27:6  CE address (4k block 33:12)
+                                        *  5:0  Reserved
+                                        */
+#define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */
+                                       /*     error syndrome register (16b) */
+
+enum e7xxx_chips {
+       E7500 = 0,
+       E7501,
+       E7505,
+       E7205,
+};
+
+
+struct e7xxx_pvt {
+       struct pci_dev *bridge_ck;
+       u32 tolm;
+       u32 remapbase;
+       u32 remaplimit;
+       const struct e7xxx_dev_info *dev_info;
+};
+
+
+struct e7xxx_dev_info {
+       u16 err_dev;
+       const char *ctl_name;
+};
+
+
+struct e7xxx_error_info {
+       u8 dram_ferr;
+       u8 dram_nerr;
+       u32 dram_celog_add;
+       u16 dram_celog_syndrome;
+       u32 dram_uelog_add;
+};
+
+static const struct e7xxx_dev_info e7xxx_devs[] = {
+       [E7500] = {
+                  .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
+                  .ctl_name = "E7500"},
+       [E7501] = {
+                  .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
+                  .ctl_name = "E7501"},
+       [E7505] = {
+                  .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
+                  .ctl_name = "E7505"},
+       [E7205] = {
+                  .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
+                  .ctl_name = "E7205"},
+};
+
+
+/* FIXME - is this valid for both SECDED and S4ECD4ED? */
+static inline int e7xxx_find_channel(u16 syndrome)
+{
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       if ((syndrome & 0xff00) == 0)
+               return 0;
+       if ((syndrome & 0x00ff) == 0)
+               return 1;
+       if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
+               return 0;
+       return 1;
+}
+
+
+static unsigned long
+ctl_page_to_phys(struct mem_ctl_info *mci, unsigned long page)
+{
+       u32 remap;
+       struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       if ((page < pvt->tolm) ||
+           ((page >= 0x100000) && (page < pvt->remapbase)))
+               return page;
+       remap = (page - pvt->tolm) + pvt->remapbase;
+       if (remap < pvt->remaplimit)
+               return remap;
+       printk(KERN_ERR "Invalid page %lx - out of range\n", page);
+       return pvt->tolm - 1;
+}
+
+
+static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+{
+       u32 error_1b, page;
+       u16 syndrome;
+       int row;
+       int channel;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       /* read the error address */
+       error_1b = info->dram_celog_add;
+       /* FIXME - should use PAGE_SHIFT */
+       page = error_1b >> 6;   /* convert the address to 4k page */
+       /* read the syndrome */
+       syndrome = info->dram_celog_syndrome;
+       /* FIXME - check for -1 */
+       row = edac_mc_find_csrow_by_page(mci, page);
+       /* convert syndrome to channel */
+       channel = e7xxx_find_channel(syndrome);
+       edac_mc_handle_ce(mci, page, 0, syndrome, row, channel,
+                              "e7xxx CE");
+}
+
+
+static void process_ce_no_info(struct mem_ctl_info *mci)
+{
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+       edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
+}
+
+
+static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
+{
+       u32 error_2b, block_page;
+       int row;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       /* read the error address */
+       error_2b = info->dram_uelog_add;
+       /* FIXME - should use PAGE_SHIFT */
+       block_page = error_2b >> 6;     /* convert to 4k address */
+       row = edac_mc_find_csrow_by_page(mci, block_page);
+       edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
+}
+
+
+static void process_ue_no_info(struct mem_ctl_info *mci)
+{
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+       edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
+}
+
+
+static void e7xxx_get_error_info (struct mem_ctl_info *mci,
+               struct e7xxx_error_info *info)
+{
+       struct e7xxx_pvt *pvt;
+
+       pvt = (struct e7xxx_pvt *) mci->pvt_info;
+       pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR,
+           &info->dram_ferr);
+       pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR,
+           &info->dram_nerr);
+
+       if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
+               pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
+                   &info->dram_celog_add);
+               pci_read_config_word(pvt->bridge_ck,
+                   E7XXX_DRAM_CELOG_SYNDROME, &info->dram_celog_syndrome);
+       }
+
+       if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
+               pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
+                   &info->dram_uelog_add);
+
+       if (info->dram_ferr & 3)
+               pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03,
+                   0x03);
+
+       if (info->dram_nerr & 3)
+               pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03,
+                   0x03);
+}
+
+
+static int e7xxx_process_error_info (struct mem_ctl_info *mci,
+               struct e7xxx_error_info *info, int handle_errors)
+{
+       int error_found;
+
+       error_found = 0;
+
+       /* decode and report errors */
+       if (info->dram_ferr & 1) {      /* check first error correctable */
+               error_found = 1;
+
+               if (handle_errors)
+                       process_ce(mci, info);
+       }
+
+       if (info->dram_ferr & 2) {      /* check first error uncorrectable */
+               error_found = 1;
+
+               if (handle_errors)
+                       process_ue(mci, info);
+       }
+
+       if (info->dram_nerr & 1) {      /* check next error correctable */
+               error_found = 1;
+
+               if (handle_errors) {
+                       if (info->dram_ferr & 1)
+                               process_ce_no_info(mci);
+                       else
+                               process_ce(mci, info);
+               }
+       }
+
+       if (info->dram_nerr & 2) {      /* check next error uncorrectable */
+               error_found = 1;
+
+               if (handle_errors) {
+                       if (info->dram_ferr & 2)
+                               process_ue_no_info(mci);
+                       else
+                               process_ue(mci, info);
+               }
+       }
+
+       return error_found;
+}
+
+
+static void e7xxx_check(struct mem_ctl_info *mci)
+{
+       struct e7xxx_error_info info;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+       e7xxx_get_error_info(mci, &info);
+       e7xxx_process_error_info(mci, &info, 1);
+}
+
+
+static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
+{
+       int rc = -ENODEV;
+       int index;
+       u16 pci_data;
+       struct mem_ctl_info *mci = NULL;
+       struct e7xxx_pvt *pvt = NULL;
+       u32 drc;
+       int drc_chan = 1;       /* Number of channels 0=1chan,1=2chan */
+       int drc_drbg = 1;       /* DRB granularity 0=32mb,1=64mb */
+       int drc_ddim;           /* DRAM Data Integrity Mode 0=none,2=edac */
+       u32 dra;
+       unsigned long last_cumul_size;
+
+
+       debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
+
+       /* need to find out the number of channels */
+       pci_read_config_dword(pdev, E7XXX_DRC, &drc);
+       /* only e7501 can be single channel */
+       if (dev_idx == E7501) {
+               drc_chan = ((drc >> 22) & 0x1);
+               drc_drbg = (drc >> 18) & 0x3;
+       }
+       drc_ddim = (drc >> 20) & 0x3;
+
+       mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
+
+       if (mci == NULL) {
+               rc = -ENOMEM;
+               goto fail;
+       }
+
+       debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
+
+       mci->mtype_cap = MEM_FLAG_RDDR;
+       mci->edac_ctl_cap =
+           EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED;
+       /* FIXME - what if different memory types are in different csrows? */
+       mci->mod_name = BS_MOD_STR;
+       mci->mod_ver = "$Revision: 1.5.2.9 $";
+       mci->pdev = pdev;
+
+       debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
+       pvt = (struct e7xxx_pvt *) mci->pvt_info;
+       pvt->dev_info = &e7xxx_devs[dev_idx];
+       pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
+                                        pvt->dev_info->err_dev,
+                                        pvt->bridge_ck);
+       if (!pvt->bridge_ck) {
+               printk(KERN_ERR
+                      "MC: error reporting device not found:"
+                      "vendor %x device 0x%x (broken BIOS?)\n",
+                      PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
+               goto fail;
+       }
+
+       debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__);
+       mci->ctl_name = pvt->dev_info->ctl_name;
+
+       mci->edac_check = e7xxx_check;
+       mci->ctl_page_to_phys = ctl_page_to_phys;
+
+       /* find out the device types */
+       pci_read_config_dword(pdev, E7XXX_DRA, &dra);
+
+       /*
+        * The dram row boundary (DRB) reg values are boundary address
+        * for each DRAM row with a granularity of 32 or 64MB (single/dual
+        * channel operation).  DRB regs are cumulative; therefore DRB7 will
+        * contain the total memory contained in all eight rows.
+        */
+       for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+               u8 value;
+               u32 cumul_size;
+               /* mem_dev 0=x8, 1=x4 */
+               int mem_dev = (dra >> (index * 4 + 3)) & 0x1;
+               struct csrow_info *csrow = &mci->csrows[index];
+
+               pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value);
+               /* convert a 64 or 32 MiB DRB to a page size. */
+               cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
+               debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
+                       __func__, index, cumul_size);
+               if (cumul_size == last_cumul_size)
+                       continue;       /* not populated */
+
+               csrow->first_page = last_cumul_size;
+               csrow->last_page = cumul_size - 1;
+               csrow->nr_pages = cumul_size - last_cumul_size;
+               last_cumul_size = cumul_size;
+               csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+               csrow->mtype = MEM_RDDR;        /* only one type supported */
+               csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+               /*
+                * if single channel or x8 devices then SECDED
+                * if dual channel and x4 then S4ECD4ED
+                */
+               if (drc_ddim) {
+                       if (drc_chan && mem_dev) {
+                               csrow->edac_mode = EDAC_S4ECD4ED;
+                               mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+                       } else {
+                               csrow->edac_mode = EDAC_SECDED;
+                               mci->edac_cap |= EDAC_FLAG_SECDED;
+                       }
+               } else
+                       csrow->edac_mode = EDAC_NONE;
+       }
+
+       mci->edac_cap |= EDAC_FLAG_NONE;
+
+       debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
+               __func__);
+       /* load the top of low memory, remap base, and remap limit vars */
+       pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data);
+       pvt->tolm = ((u32) pci_data) << 4;
+       pci_read_config_word(mci->pdev, E7XXX_REMAPBASE, &pci_data);
+       pvt->remapbase = ((u32) pci_data) << 14;
+       pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data);
+       pvt->remaplimit = ((u32) pci_data) << 14;
+       printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
+              pvt->remapbase, pvt->remaplimit);
+
+       /* clear any pending errors, or initial state bits */
+       pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
+       pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
+
+       if (edac_mc_add_mc(mci) != 0) {
+               debugf3("MC: " __FILE__
+                       ": %s(): failed edac_mc_add_mc()\n",
+                       __func__);
+               goto fail;
+       }
+
+       /* get this far and it's successful */
+       debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+       return 0;
+
+fail:
+       if (mci != NULL) {
+               if(pvt != NULL && pvt->bridge_ck)
+                       pci_dev_put(pvt->bridge_ck);
+               edac_mc_free(mci);
+       }
+
+       return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit
+e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+       /* wake up and enable device */
+       return pci_enable_device(pdev) ?
+           -EIO : e7xxx_probe1(pdev, ent->driver_data);
+}
+
+
+static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
+{
+       struct mem_ctl_info *mci;
+       struct e7xxx_pvt *pvt;
+
+       debugf0(__FILE__ ": %s()\n", __func__);
+
+       if (((mci = edac_mc_find_mci_by_pdev(pdev)) != 0) &&
+           edac_mc_del_mc(mci)) {
+               pvt = (struct e7xxx_pvt *) mci->pvt_info;
+               pci_dev_put(pvt->bridge_ck);
+               edac_mc_free(mci);
+       }
+}
+
+
+static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
+       {PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+        E7205},
+       {PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+        E7500},
+       {PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+        E7501},
+       {PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+        E7505},
+       {0,}                    /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
+
+
+static struct pci_driver e7xxx_driver = {
+       .name = BS_MOD_STR,
+       .probe = e7xxx_init_one,
+       .remove = __devexit_p(e7xxx_remove_one),
+       .id_table = e7xxx_pci_tbl,
+};
+
+
+static int __init e7xxx_init(void)
+{
+       return pci_register_driver(&e7xxx_driver);
+}
+
+
+static void __exit e7xxx_exit(void)
+{
+       pci_unregister_driver(&e7xxx_driver);
+}
+
+module_init(e7xxx_init);
+module_exit(e7xxx_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
+             "Based on.work by Dan Hollis et al");
+MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
new file mode 100644 (file)
index 0000000..4be9bd0
--- /dev/null
@@ -0,0 +1,2209 @@
+/*
+ * edac_mc kernel module
+ * (C) 2005 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ *     http://www.anime.net/~goemon/linux-ecc/
+ *
+ * Modified by Dave Peterson and Doug Thompson
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/edac.h>
+
+#include "edac_mc.h"
+
+#define        EDAC_MC_VERSION "edac_mc  Ver: 2.0.0 " __DATE__
+
+#ifdef CONFIG_EDAC_DEBUG
+/* Values of 0 to 4 will generate output */
+int edac_debug_level = 1;
+EXPORT_SYMBOL(edac_debug_level);
+#endif
+
+/* EDAC Controls, setable by module parameter, and sysfs */
+static int log_ue = 1;
+static int log_ce = 1;
+static int panic_on_ue = 1;
+static int poll_msec = 1000;
+
+static int check_pci_parity = 0;       /* default YES check PCI parity */
+static int panic_on_pci_parity;                /* default no panic on PCI Parity */
+static atomic_t pci_parity_count = ATOMIC_INIT(0);
+
+/* lock to memory controller's control array */
+static DECLARE_MUTEX(mem_ctls_mutex);
+static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
+
+/* Structure of the whitelist and blacklist arrays */
+struct edac_pci_device_list {
+       unsigned int  vendor;           /* Vendor ID */
+       unsigned int  device;           /* Deviice ID */
+};
+
+
+#define MAX_LISTED_PCI_DEVICES         32
+
+/* List of PCI devices (vendor-id:device-id) that should be skipped */
+static struct edac_pci_device_list pci_blacklist[MAX_LISTED_PCI_DEVICES];
+static int pci_blacklist_count;
+
+/* List of PCI devices (vendor-id:device-id) that should be scanned */
+static struct edac_pci_device_list pci_whitelist[MAX_LISTED_PCI_DEVICES];
+static int pci_whitelist_count ;
+
+/*  START sysfs data and methods */
+
+static const char *mem_types[] = {
+       [MEM_EMPTY] = "Empty",
+       [MEM_RESERVED] = "Reserved",
+       [MEM_UNKNOWN] = "Unknown",
+       [MEM_FPM] = "FPM",
+       [MEM_EDO] = "EDO",
+       [MEM_BEDO] = "BEDO",
+       [MEM_SDR] = "Unbuffered-SDR",
+       [MEM_RDR] = "Registered-SDR",
+       [MEM_DDR] = "Unbuffered-DDR",
+       [MEM_RDDR] = "Registered-DDR",
+       [MEM_RMBS] = "RMBS"
+};
+
+static const char *dev_types[] = {
+       [DEV_UNKNOWN] = "Unknown",
+       [DEV_X1] = "x1",
+       [DEV_X2] = "x2",
+       [DEV_X4] = "x4",
+       [DEV_X8] = "x8",
+       [DEV_X16] = "x16",
+       [DEV_X32] = "x32",
+       [DEV_X64] = "x64"
+};
+
+static const char *edac_caps[] = {
+       [EDAC_UNKNOWN] = "Unknown",
+       [EDAC_NONE] = "None",
+       [EDAC_RESERVED] = "Reserved",
+       [EDAC_PARITY] = "PARITY",
+       [EDAC_EC] = "EC",
+       [EDAC_SECDED] = "SECDED",
+       [EDAC_S2ECD2ED] = "S2ECD2ED",
+       [EDAC_S4ECD4ED] = "S4ECD4ED",
+       [EDAC_S8ECD8ED] = "S8ECD8ED",
+       [EDAC_S16ECD16ED] = "S16ECD16ED"
+};
+
+
+/* sysfs object: /sys/devices/system/edac */
+static struct sysdev_class edac_class = {
+       set_kset_name("edac"),
+};
+
+/* sysfs objects:
+ *     /sys/devices/system/edac/mc
+ *     /sys/devices/system/edac/pci
+ */
+static struct kobject edac_memctrl_kobj;
+static struct kobject edac_pci_kobj;
+
+/*
+ * /sys/devices/system/edac/mc;
+ *     data structures and methods
+ */
+static ssize_t memctrl_string_show(void *ptr, char *buffer)
+{
+       char *value = (char*) ptr;
+       return sprintf(buffer, "%s\n", value);
+}
+
+static ssize_t memctrl_int_show(void *ptr, char *buffer)
+{
+       int *value = (int*) ptr;
+       return sprintf(buffer, "%d\n", *value);
+}
+
+static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
+{
+       int *value = (int*) ptr;
+
+       if (isdigit(*buffer))
+               *value = simple_strtoul(buffer, NULL, 0);
+
+       return count;
+}
+
+struct memctrl_dev_attribute {
+       struct attribute        attr;
+       void    *value;
+       ssize_t (*show)(void *,char *);
+       ssize_t (*store)(void *, const char *, size_t);
+};
+
+/* Set of show/store abstract level functions for memory control object */
+static ssize_t
+memctrl_dev_show(struct kobject *kobj, struct attribute *attr, char *buffer)
+{
+       struct memctrl_dev_attribute *memctrl_dev;
+       memctrl_dev = (struct memctrl_dev_attribute*)attr;
+
+       if (memctrl_dev->show)
+               return memctrl_dev->show(memctrl_dev->value, buffer);
+       return -EIO;
+}
+
+static ssize_t
+memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
+                       const char *buffer, size_t count)
+{
+       struct memctrl_dev_attribute *memctrl_dev;
+       memctrl_dev = (struct memctrl_dev_attribute*)attr;
+
+       if (memctrl_dev->store)
+               return memctrl_dev->store(memctrl_dev->value, buffer, count);
+       return -EIO;
+}
+
+static struct sysfs_ops memctrlfs_ops = {
+       .show   = memctrl_dev_show,
+       .store  = memctrl_dev_store
+};
+
+#define MEMCTRL_ATTR(_name,_mode,_show,_store)                 \
+struct memctrl_dev_attribute attr_##_name = {                  \
+       .attr = {.name = __stringify(_name), .mode = _mode },   \
+       .value  = &_name,                                       \
+       .show   = _show,                                        \
+       .store  = _store,                                       \
+};
+
+#define MEMCTRL_STRING_ATTR(_name,_data,_mode,_show,_store)    \
+struct memctrl_dev_attribute attr_##_name = {                  \
+       .attr = {.name = __stringify(_name), .mode = _mode },   \
+       .value  = _data,                                        \
+       .show   = _show,                                        \
+       .store  = _store,                                       \
+};
+
+/* cwrow<id> attribute f*/
+MEMCTRL_STRING_ATTR(mc_version,EDAC_MC_VERSION,S_IRUGO,memctrl_string_show,NULL);
+
+/* csrow<id> control files */
+MEMCTRL_ATTR(panic_on_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
+MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
+MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
+MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
+
+
+/* Base Attributes of the memory ECC object */
+static struct memctrl_dev_attribute *memctrl_attr[] = {
+       &attr_panic_on_ue,
+       &attr_log_ue,
+       &attr_log_ce,
+       &attr_poll_msec,
+       &attr_mc_version,
+       NULL,
+};
+
+/* Main MC kobject release() function */
+static void edac_memctrl_master_release(struct kobject *kobj)
+{
+       debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
+}
+
+static struct kobj_type ktype_memctrl = {
+       .release        = edac_memctrl_master_release,
+       .sysfs_ops      = &memctrlfs_ops,
+       .default_attrs  = (struct attribute **) memctrl_attr,
+};
+
+
+/* Initialize the main sysfs entries for edac:
+ *   /sys/devices/system/edac
+ *
+ * and children
+ *
+ * Return:  0 SUCCESS
+ *         !0 FAILURE
+ */
+static int edac_sysfs_memctrl_setup(void)
+{
+       int err=0;
+
+       debugf1("MC: " __FILE__ ": %s()\n", __func__);
+
+       /* create the /sys/devices/system/edac directory */
+       err = sysdev_class_register(&edac_class);
+       if (!err) {
+               /* Init the MC's kobject */
+               memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj));
+               kobject_init(&edac_memctrl_kobj);
+
+               edac_memctrl_kobj.parent = &edac_class.kset.kobj;
+               edac_memctrl_kobj.ktype = &ktype_memctrl;
+
+               /* generate sysfs "..../edac/mc"   */
+               err = kobject_set_name(&edac_memctrl_kobj,"mc");
+               if (!err) {
+                       /* FIXME: maybe new sysdev_create_subdir() */
+                       err = kobject_register(&edac_memctrl_kobj);
+                       if (err) {
+                               debugf1("Failed to register '.../edac/mc'\n");
+                       } else {
+                               debugf1("Registered '.../edac/mc' kobject\n");
+                       }
+               }
+       } else {
+               debugf1(KERN_WARNING "__FILE__ %s() error=%d\n", __func__,err);
+       }
+
+       return err;
+}
+
+/*
+ * MC teardown:
+ *     the '..../edac/mc' kobject followed by '..../edac' itself
+ */
+static void edac_sysfs_memctrl_teardown(void)
+{
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+       /* Unregister the MC's kobject */
+       kobject_unregister(&edac_memctrl_kobj);
+
+       /* release the master edac mc kobject */
+       kobject_put(&edac_memctrl_kobj);
+
+       /* Unregister the 'edac' object */
+       sysdev_class_unregister(&edac_class);
+}
+
+/*
+ * /sys/devices/system/edac/pci;
+ *     data structures and methods
+ */
+
+struct list_control {
+       struct edac_pci_device_list *list;
+       int *count;
+};
+
+/* Output the list as:  vendor_id:device:id<,vendor_id:device_id> */
+static ssize_t edac_pci_list_string_show(void *ptr, char *buffer)
+{
+       struct list_control *listctl;
+       struct edac_pci_device_list *list;
+       char *p = buffer;
+       int len=0;
+       int i;
+
+       listctl = ptr;
+       list = listctl->list;
+
+       for (i = 0; i < *(listctl->count); i++, list++ ) {
+               if (len > 0)
+                       len += snprintf(p + len, (PAGE_SIZE-len), ",");
+
+               len += snprintf(p + len,
+                               (PAGE_SIZE-len),
+                               "%x:%x",
+                               list->vendor,list->device);
+       }
+
+       len += snprintf(p + len,(PAGE_SIZE-len), "\n");
+
+       return (ssize_t) len;
+}
+
+/**
+ *
+ * Scan string from **s to **e looking for one 'vendor:device' tuple
+ * where each field is a hex value
+ *
+ * return 0 if an entry is NOT found
+ * return 1 if an entry is found
+ *     fill in *vendor_id and *device_id with values found
+ *
+ * In both cases, make sure *s has been moved forward toward *e
+ */
+static int parse_one_device(const char **s,const char **e,
+       unsigned int *vendor_id, unsigned int *device_id)
+{
+       const char *runner, *p;
+
+       /* if null byte, we are done */
+       if (!**s) {
+               (*s)++; /* keep *s moving */
+               return 0;
+       }
+
+       /* skip over newlines & whitespace */
+       if ((**s == '\n') || isspace(**s)) {
+               (*s)++;
+               return 0;
+       }
+
+       if (!isxdigit(**s)) {
+               (*s)++;
+               return 0;
+       }
+
+       /* parse vendor_id */
+       runner = *s;
+       while (runner < *e) {
+               /* scan for vendor:device delimiter */
+               if (*runner == ':') {
+                       *vendor_id = simple_strtol((char*) *s, (char**) &p, 16);
+                       runner = p + 1;
+                       break;
+               }
+               runner++;
+       }
+
+       if (!isxdigit(*runner)) {
+               *s = ++runner;
+               return 0;
+       }
+
+       /* parse device_id */
+       if (runner < *e) {
+               *device_id = simple_strtol((char*)runner, (char**)&p, 16);
+               runner = p;
+       }
+
+       *s = runner;
+
+       return 1;
+}
+
+static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer,
+                                       size_t count)
+{
+       struct list_control *listctl;
+       struct edac_pci_device_list *list;
+       unsigned int vendor_id, device_id;
+       const char *s, *e;
+       int *index;
+
+       s = (char*)buffer;
+       e = s + count;
+
+       listctl = ptr;
+       list = listctl->list;
+       index = listctl->count;
+
+       *index = 0;
+       while (*index < MAX_LISTED_PCI_DEVICES) {
+
+               if (parse_one_device(&s,&e,&vendor_id,&device_id)) {
+                       list[ *index ].vendor = vendor_id;
+                       list[ *index ].device = device_id;
+                       (*index)++;
+               }
+
+               /* check for all data consume */
+               if (s >= e)
+                       break;
+       }
+
+       return count;
+}
+
+static ssize_t edac_pci_int_show(void *ptr, char *buffer)
+{
+       int *value = ptr;
+       return sprintf(buffer,"%d\n",*value);
+}
+
+static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
+{
+       int *value = ptr;
+
+       if (isdigit(*buffer))
+               *value = simple_strtoul(buffer,NULL,0);
+
+       return count;
+}
+
+struct edac_pci_dev_attribute {
+       struct attribute        attr;
+       void    *value;
+       ssize_t (*show)(void *,char *);
+       ssize_t (*store)(void *, const char *,size_t);
+};
+
+/* Set of show/store abstract level functions for PCI Parity object */
+static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
+                               char *buffer)
+{
+       struct edac_pci_dev_attribute *edac_pci_dev;
+       edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
+
+       if (edac_pci_dev->show)
+               return edac_pci_dev->show(edac_pci_dev->value, buffer);
+       return -EIO;
+}
+
+static ssize_t edac_pci_dev_store(struct kobject *kobj, struct attribute *attr,
+                               const char *buffer, size_t count)
+{
+       struct edac_pci_dev_attribute *edac_pci_dev;
+       edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
+
+       if (edac_pci_dev->show)
+               return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
+       return -EIO;
+}
+
+static struct sysfs_ops edac_pci_sysfs_ops = {
+       .show   = edac_pci_dev_show,
+       .store  = edac_pci_dev_store
+};
+
+
+#define EDAC_PCI_ATTR(_name,_mode,_show,_store)                        \
+struct edac_pci_dev_attribute edac_pci_attr_##_name = {                \
+       .attr = {.name = __stringify(_name), .mode = _mode },   \
+       .value  = &_name,                                       \
+       .show   = _show,                                        \
+       .store  = _store,                                       \
+};
+
+#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store)   \
+struct edac_pci_dev_attribute edac_pci_attr_##_name = {                \
+       .attr = {.name = __stringify(_name), .mode = _mode },   \
+       .value  = _data,                                        \
+       .show   = _show,                                        \
+       .store  = _store,                                       \
+};
+
+static struct list_control pci_whitelist_control = {
+       .list = pci_whitelist,
+       .count = &pci_whitelist_count
+};
+
+static struct list_control pci_blacklist_control = {
+       .list = pci_blacklist,
+       .count = &pci_blacklist_count
+};
+
+/* whitelist attribute */
+EDAC_PCI_STRING_ATTR(pci_parity_whitelist,
+       &pci_whitelist_control,
+       S_IRUGO|S_IWUSR,
+       edac_pci_list_string_show,
+       edac_pci_list_string_store);
+
+EDAC_PCI_STRING_ATTR(pci_parity_blacklist,
+       &pci_blacklist_control,
+       S_IRUGO|S_IWUSR,
+       edac_pci_list_string_show,
+       edac_pci_list_string_store);
+
+/* PCI Parity control files */
+EDAC_PCI_ATTR(check_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
+EDAC_PCI_ATTR(panic_on_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store);
+EDAC_PCI_ATTR(pci_parity_count,S_IRUGO,edac_pci_int_show,NULL);
+
+/* Base Attributes of the memory ECC object */
+static struct edac_pci_dev_attribute *edac_pci_attr[] = {
+       &edac_pci_attr_check_pci_parity,
+       &edac_pci_attr_panic_on_pci_parity,
+       &edac_pci_attr_pci_parity_count,
+       &edac_pci_attr_pci_parity_whitelist,
+       &edac_pci_attr_pci_parity_blacklist,
+       NULL,
+};
+
+/* No memory to release */
+static void edac_pci_release(struct kobject *kobj)
+{
+       debugf1("EDAC PCI: " __FILE__ ": %s()\n", __func__);
+}
+
+static struct kobj_type ktype_edac_pci = {
+       .release        = edac_pci_release,
+       .sysfs_ops      = &edac_pci_sysfs_ops,
+       .default_attrs  = (struct attribute **) edac_pci_attr,
+};
+
+/**
+ * edac_sysfs_pci_setup()
+ *
+ */
+static int edac_sysfs_pci_setup(void)
+{
+       int err;
+
+       debugf1("MC: " __FILE__ ": %s()\n", __func__);
+
+       memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj));
+
+       kobject_init(&edac_pci_kobj);
+       edac_pci_kobj.parent = &edac_class.kset.kobj;
+       edac_pci_kobj.ktype = &ktype_edac_pci;
+
+       err = kobject_set_name(&edac_pci_kobj, "pci");
+       if (!err) {
+               /* Instanstiate the csrow object */
+               /* FIXME: maybe new sysdev_create_subdir() */
+               err = kobject_register(&edac_pci_kobj);
+               if (err)
+                       debugf1("Failed to register '.../edac/pci'\n");
+               else
+                       debugf1("Registered '.../edac/pci' kobject\n");
+       }
+       return err;
+}
+
+
+static void edac_sysfs_pci_teardown(void)
+{
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+       kobject_unregister(&edac_pci_kobj);
+       kobject_put(&edac_pci_kobj);
+}
+
+/* EDAC sysfs CSROW data structures and methods */
+
+/* Set of more detailed csrow<id> attribute show/store functions */
+static ssize_t csrow_ch0_dimm_label_show(struct csrow_info *csrow, char *data)
+{
+       ssize_t size = 0;
+
+       if (csrow->nr_channels > 0) {
+               size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n",
+                       csrow->channels[0].label);
+       }
+       return size;
+}
+
+static ssize_t csrow_ch1_dimm_label_show(struct csrow_info *csrow, char *data)
+{
+       ssize_t size = 0;
+
+       if (csrow->nr_channels > 0) {
+               size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
+                       csrow->channels[1].label);
+       }
+       return size;
+}
+
+static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow,
+                       const char *data, size_t size)
+{
+       ssize_t max_size = 0;
+
+       if (csrow->nr_channels > 0) {
+               max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
+               strncpy(csrow->channels[0].label, data, max_size);
+               csrow->channels[0].label[max_size] = '\0';
+       }
+       return size;
+}
+
+static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow,
+                       const char *data, size_t size)
+{
+       ssize_t max_size = 0;
+
+       if (csrow->nr_channels > 1) {
+               max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
+               strncpy(csrow->channels[1].label, data, max_size);
+               csrow->channels[1].label[max_size] = '\0';
+       }
+       return max_size;
+}
+
+static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data)
+{
+       return sprintf(data,"%u\n", csrow->ue_count);
+}
+
+static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data)
+{
+       return sprintf(data,"%u\n", csrow->ce_count);
+}
+
+static ssize_t csrow_ch0_ce_count_show(struct csrow_info *csrow, char *data)
+{
+       ssize_t size = 0;
+
+       if (csrow->nr_channels > 0) {
+               size = sprintf(data,"%u\n", csrow->channels[0].ce_count);
+       }
+       return size;
+}
+
+static ssize_t csrow_ch1_ce_count_show(struct csrow_info *csrow, char *data)
+{
+       ssize_t size = 0;
+
+       if (csrow->nr_channels > 1) {
+               size = sprintf(data,"%u\n", csrow->channels[1].ce_count);
+       }
+       return size;
+}
+
+static ssize_t csrow_size_show(struct csrow_info *csrow, char *data)
+{
+       return sprintf(data,"%u\n", PAGES_TO_MiB(csrow->nr_pages));
+}
+
+static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data)
+{
+       return sprintf(data,"%s\n", mem_types[csrow->mtype]);
+}
+
+static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data)
+{
+       return sprintf(data,"%s\n", dev_types[csrow->dtype]);
+}
+
+static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data)
+{
+       return sprintf(data,"%s\n", edac_caps[csrow->edac_mode]);
+}
+
+struct csrowdev_attribute {
+       struct attribute        attr;
+       ssize_t (*show)(struct csrow_info *,char *);
+       ssize_t (*store)(struct csrow_info *, const char *,size_t);
+};
+
+#define to_csrow(k) container_of(k, struct csrow_info, kobj)
+#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
+
+/* Set of show/store higher level functions for csrow objects */
+static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr,
+                               char *buffer)
+{
+       struct csrow_info *csrow = to_csrow(kobj);
+       struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
+
+       if (csrowdev_attr->show)
+               return csrowdev_attr->show(csrow, buffer);
+       return -EIO;
+}
+
+static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
+                               const char *buffer, size_t count)
+{
+       struct csrow_info *csrow = to_csrow(kobj);
+       struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr);
+
+       if (csrowdev_attr->store)
+               return csrowdev_attr->store(csrow, buffer, count);
+       return -EIO;
+}
+
+static struct sysfs_ops csrowfs_ops = {
+       .show   = csrowdev_show,
+       .store  = csrowdev_store
+};
+
+#define CSROWDEV_ATTR(_name,_mode,_show,_store)                        \
+struct csrowdev_attribute attr_##_name = {                     \
+       .attr = {.name = __stringify(_name), .mode = _mode },   \
+       .show   = _show,                                        \
+       .store  = _store,                                       \
+};
+
+/* cwrow<id>/attribute files */
+CSROWDEV_ATTR(size_mb,S_IRUGO,csrow_size_show,NULL);
+CSROWDEV_ATTR(dev_type,S_IRUGO,csrow_dev_type_show,NULL);
+CSROWDEV_ATTR(mem_type,S_IRUGO,csrow_mem_type_show,NULL);
+CSROWDEV_ATTR(edac_mode,S_IRUGO,csrow_edac_mode_show,NULL);
+CSROWDEV_ATTR(ue_count,S_IRUGO,csrow_ue_count_show,NULL);
+CSROWDEV_ATTR(ce_count,S_IRUGO,csrow_ce_count_show,NULL);
+CSROWDEV_ATTR(ch0_ce_count,S_IRUGO,csrow_ch0_ce_count_show,NULL);
+CSROWDEV_ATTR(ch1_ce_count,S_IRUGO,csrow_ch1_ce_count_show,NULL);
+
+/* control/attribute files */
+CSROWDEV_ATTR(ch0_dimm_label,S_IRUGO|S_IWUSR,
+               csrow_ch0_dimm_label_show,
+               csrow_ch0_dimm_label_store);
+CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR,
+               csrow_ch1_dimm_label_show,
+               csrow_ch1_dimm_label_store);
+
+
+/* Attributes of the CSROW<id> object */
+static struct csrowdev_attribute *csrow_attr[] = {
+       &attr_dev_type,
+       &attr_mem_type,
+       &attr_edac_mode,
+       &attr_size_mb,
+       &attr_ue_count,
+       &attr_ce_count,
+       &attr_ch0_ce_count,
+       &attr_ch1_ce_count,
+       &attr_ch0_dimm_label,
+       &attr_ch1_dimm_label,
+       NULL,
+};
+
+
+/* No memory to release */
+static void edac_csrow_instance_release(struct kobject *kobj)
+{
+       debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__);
+}
+
+static struct kobj_type ktype_csrow = {
+       .release        = edac_csrow_instance_release,
+       .sysfs_ops      = &csrowfs_ops,
+       .default_attrs  = (struct attribute **) csrow_attr,
+};
+
+/* Create a CSROW object under specifed edac_mc_device */
+static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
+                               struct csrow_info *csrow, int index )
+{
+       int err = 0;
+
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+       memset(&csrow->kobj, 0, sizeof(csrow->kobj));
+
+       /* generate ..../edac/mc/mc<id>/csrow<index>   */
+
+       kobject_init(&csrow->kobj);
+       csrow->kobj.parent = edac_mci_kobj;
+       csrow->kobj.ktype = &ktype_csrow;
+
+       /* name this instance of csrow<id> */
+       err = kobject_set_name(&csrow->kobj,"csrow%d",index);
+       if (!err) {
+               /* Instanstiate the csrow object */
+               err = kobject_register(&csrow->kobj);
+               if (err)
+                       debugf0("Failed to register CSROW%d\n",index);
+               else
+                       debugf0("Registered CSROW%d\n",index);
+       }
+
+       return err;
+}
+
+/* sysfs data structures and methods for the MCI kobjects */
+
+static ssize_t mci_reset_counters_store(struct mem_ctl_info  *mci,
+                                       const char *data, size_t count )
+{
+       int row, chan;
+
+       mci->ue_noinfo_count = 0;
+       mci->ce_noinfo_count = 0;
+       mci->ue_count = 0;
+       mci->ce_count = 0;
+       for (row = 0; row < mci->nr_csrows; row++) {
+               struct csrow_info *ri = &mci->csrows[row];
+
+               ri->ue_count = 0;
+               ri->ce_count = 0;
+               for (chan = 0; chan < ri->nr_channels; chan++)
+                       ri->channels[chan].ce_count = 0;
+       }
+       mci->start_time = jiffies;
+
+       return count;
+}
+
+static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
+{
+       return sprintf(data,"%d\n", mci->ue_count);
+}
+
+static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
+{
+       return sprintf(data,"%d\n", mci->ce_count);
+}
+
+static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+       return sprintf(data,"%d\n", mci->ce_noinfo_count);
+}
+
+static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+       return sprintf(data,"%d\n", mci->ue_noinfo_count);
+}
+
+static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
+{
+       return sprintf(data,"%ld\n", (jiffies - mci->start_time) / HZ);
+}
+
+static ssize_t mci_mod_name_show(struct mem_ctl_info *mci, char *data)
+{
+       return sprintf(data,"%s %s\n", mci->mod_name, mci->mod_ver);
+}
+
+static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
+{
+       return sprintf(data,"%s\n", mci->ctl_name);
+}
+
+static int mci_output_edac_cap(char *buf, unsigned long edac_cap)
+{
+       char *p = buf;
+       int bit_idx;
+
+       for (bit_idx = 0; bit_idx < 8 * sizeof(edac_cap); bit_idx++) {
+               if ((edac_cap >> bit_idx) & 0x1)
+                       p += sprintf(p, "%s ", edac_caps[bit_idx]);
+       }
+
+       return p - buf;
+}
+
+static ssize_t mci_edac_capability_show(struct mem_ctl_info *mci, char *data)
+{
+       char *p = data;
+
+       p += mci_output_edac_cap(p,mci->edac_ctl_cap);
+       p += sprintf(p, "\n");
+
+       return p - data;
+}
+
+static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci,
+                                               char *data)
+{
+       char *p = data;
+
+       p += mci_output_edac_cap(p,mci->edac_cap);
+       p += sprintf(p, "\n");
+
+       return p - data;
+}
+
+static int mci_output_mtype_cap(char *buf, unsigned long mtype_cap)
+{
+       char *p = buf;
+       int bit_idx;
+
+       for (bit_idx = 0; bit_idx < 8 * sizeof(mtype_cap); bit_idx++) {
+               if ((mtype_cap >> bit_idx) & 0x1)
+                       p += sprintf(p, "%s ", mem_types[bit_idx]);
+       }
+
+       return p - buf;
+}
+
+static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, char *data)
+{
+       char *p = data;
+
+       p += mci_output_mtype_cap(p,mci->mtype_cap);
+       p += sprintf(p, "\n");
+
+       return p - data;
+}
+
+static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
+{
+       int total_pages, csrow_idx;
+
+       for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
+                       csrow_idx++) {
+               struct csrow_info *csrow = &mci->csrows[csrow_idx];
+
+               if (!csrow->nr_pages)
+                       continue;
+               total_pages += csrow->nr_pages;
+       }
+
+       return sprintf(data,"%u\n", PAGES_TO_MiB(total_pages));
+}
+
+struct mcidev_attribute {
+       struct attribute        attr;
+       ssize_t (*show)(struct mem_ctl_info *,char *);
+       ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
+};
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
+#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr)
+
+static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
+                       char *buffer)
+{
+       struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+       struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
+
+       if (mcidev_attr->show)
+               return mcidev_attr->show(mem_ctl_info, buffer);
+       return -EIO;
+}
+
+static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
+                               const char *buffer, size_t count)
+{
+       struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+       struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
+
+       if (mcidev_attr->store)
+               return mcidev_attr->store(mem_ctl_info, buffer, count);
+       return -EIO;
+}
+
+static struct sysfs_ops mci_ops = {
+       .show   = mcidev_show,
+       .store  = mcidev_store
+};
+
+#define MCIDEV_ATTR(_name,_mode,_show,_store)                  \
+struct mcidev_attribute mci_attr_##_name = {                   \
+       .attr = {.name = __stringify(_name), .mode = _mode },   \
+       .show   = _show,                                        \
+       .store  = _store,                                       \
+};
+
+/* Control file */
+MCIDEV_ATTR(reset_counters,S_IWUSR,NULL,mci_reset_counters_store);
+
+/* Attribute files */
+MCIDEV_ATTR(mc_name,S_IRUGO,mci_ctl_name_show,NULL);
+MCIDEV_ATTR(module_name,S_IRUGO,mci_mod_name_show,NULL);
+MCIDEV_ATTR(edac_capability,S_IRUGO,mci_edac_capability_show,NULL);
+MCIDEV_ATTR(size_mb,S_IRUGO,mci_size_mb_show,NULL);
+MCIDEV_ATTR(seconds_since_reset,S_IRUGO,mci_seconds_show,NULL);
+MCIDEV_ATTR(ue_noinfo_count,S_IRUGO,mci_ue_noinfo_show,NULL);
+MCIDEV_ATTR(ce_noinfo_count,S_IRUGO,mci_ce_noinfo_show,NULL);
+MCIDEV_ATTR(ue_count,S_IRUGO,mci_ue_count_show,NULL);
+MCIDEV_ATTR(ce_count,S_IRUGO,mci_ce_count_show,NULL);
+MCIDEV_ATTR(edac_current_capability,S_IRUGO,
+       mci_edac_current_capability_show,NULL);
+MCIDEV_ATTR(supported_mem_type,S_IRUGO,
+       mci_supported_mem_type_show,NULL);
+
+
+static struct mcidev_attribute *mci_attr[] = {
+       &mci_attr_reset_counters,
+       &mci_attr_module_name,
+       &mci_attr_mc_name,
+       &mci_attr_edac_capability,
+       &mci_attr_edac_current_capability,
+       &mci_attr_supported_mem_type,
+       &mci_attr_size_mb,
+       &mci_attr_seconds_since_reset,
+       &mci_attr_ue_noinfo_count,
+       &mci_attr_ce_noinfo_count,
+       &mci_attr_ue_count,
+       &mci_attr_ce_count,
+       NULL
+};
+
+
+/*
+ * Release of a MC controlling instance
+ */
+static void edac_mci_instance_release(struct kobject *kobj)
+{
+       struct mem_ctl_info *mci;
+       mci = container_of(kobj,struct mem_ctl_info,edac_mci_kobj);
+
+       debugf0("MC: " __FILE__ ": %s() idx=%d calling kfree\n",
+               __func__, mci->mc_idx);
+
+       kfree(mci);
+}
+
+static struct kobj_type ktype_mci = {
+       .release        = edac_mci_instance_release,
+       .sysfs_ops      = &mci_ops,
+       .default_attrs  = (struct attribute **) mci_attr,
+};
+
+#define EDAC_DEVICE_SYMLINK    "device"
+
+/*
+ * Create a new Memory Controller kobject instance,
+ *     mc<id> under the 'mc' directory
+ *
+ * Return:
+ *     0       Success
+ *     !0      Failure
+ */
+static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+       int i;
+       int err;
+       struct csrow_info *csrow;
+       struct kobject *edac_mci_kobj=&mci->edac_mci_kobj;
+
+       debugf0("MC: " __FILE__ ": %s() idx=%d\n", __func__, mci->mc_idx);
+
+       memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj));
+       kobject_init(edac_mci_kobj);
+
+       /* set the name of the mc<id> object */
+       err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx);
+       if (err)
+               return err;
+
+       /* link to our parent the '..../edac/mc' object */
+       edac_mci_kobj->parent = &edac_memctrl_kobj;
+       edac_mci_kobj->ktype = &ktype_mci;
+
+       /* register the mc<id> kobject */
+       err = kobject_register(edac_mci_kobj);
+       if (err)
+               return err;
+
+       /* create a symlink for the device */
+       err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj,
+                               EDAC_DEVICE_SYMLINK);
+       if (err) {
+               kobject_unregister(edac_mci_kobj);
+               return err;
+       }
+
+       /* Make directories for each CSROW object
+        * under the mc<id> kobject
+        */
+       for (i = 0; i < mci->nr_csrows; i++) {
+
+               csrow = &mci->csrows[i];
+
+               /* Only expose populated CSROWs */
+               if (csrow->nr_pages > 0) {
+                       err = edac_create_csrow_object(edac_mci_kobj,csrow,i);
+                       if (err)
+                               goto fail;
+               }
+       }
+
+       /* Mark this MCI instance as having sysfs entries */
+       mci->sysfs_active = MCI_SYSFS_ACTIVE;
+
+       return 0;
+
+
+       /* CSROW error: backout what has already been registered,  */
+fail:
+       for ( i--; i >= 0; i--) {
+               if (csrow->nr_pages > 0) {
+                       kobject_unregister(&mci->csrows[i].kobj);
+                       kobject_put(&mci->csrows[i].kobj);
+               }
+       }
+
+       kobject_unregister(edac_mci_kobj);
+       kobject_put(edac_mci_kobj);
+
+       return err;
+}
+
+/*
+ * remove a Memory Controller instance
+ */
+static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+       int i;
+
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+       /* remove all csrow kobjects */
+       for (i = 0; i < mci->nr_csrows; i++) {
+               if (mci->csrows[i].nr_pages > 0)  {
+                       kobject_unregister(&mci->csrows[i].kobj);
+                       kobject_put(&mci->csrows[i].kobj);
+               }
+       }
+
+       sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
+
+       kobject_unregister(&mci->edac_mci_kobj);
+       kobject_put(&mci->edac_mci_kobj);
+}
+
+/* END OF sysfs data and methods */
+
+#ifdef CONFIG_EDAC_DEBUG
+
+EXPORT_SYMBOL(edac_mc_dump_channel);
+
+void edac_mc_dump_channel(struct channel_info *chan)
+{
+       debugf4("\tchannel = %p\n", chan);
+       debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
+       debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
+       debugf4("\tchannel->label = '%s'\n", chan->label);
+       debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
+}
+
+
+EXPORT_SYMBOL(edac_mc_dump_csrow);
+
+void edac_mc_dump_csrow(struct csrow_info *csrow)
+{
+       debugf4("\tcsrow = %p\n", csrow);
+       debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
+       debugf4("\tcsrow->first_page = 0x%lx\n",
+               csrow->first_page);
+       debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
+       debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
+       debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
+       debugf4("\tcsrow->nr_channels = %d\n",
+               csrow->nr_channels);
+       debugf4("\tcsrow->channels = %p\n", csrow->channels);
+       debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
+}
+
+
+EXPORT_SYMBOL(edac_mc_dump_mci);
+
+void edac_mc_dump_mci(struct mem_ctl_info *mci)
+{
+       debugf3("\tmci = %p\n", mci);
+       debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
+       debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
+       debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
+       debugf4("\tmci->edac_check = %p\n", mci->edac_check);
+       debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
+               mci->nr_csrows, mci->csrows);
+       debugf3("\tpdev = %p\n", mci->pdev);
+       debugf3("\tmod_name:ctl_name = %s:%s\n",
+               mci->mod_name, mci->ctl_name);
+       debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
+}
+
+
+#endif                         /* CONFIG_EDAC_DEBUG */
+
+/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
+ * Adjust 'ptr' so that its alignment is at least as stringent as what the
+ * compiler would provide for X and return the aligned result.
+ *
+ * If 'size' is a constant, the compiler will optimize this whole function
+ * down to either a no-op or the addition of a constant to the value of 'ptr'.
+ */
+static inline char * align_ptr (void *ptr, unsigned size)
+{
+       unsigned align, r;
+
+       /* Here we assume that the alignment of a "long long" is the most
+        * stringent alignment that the compiler will ever provide by default.
+        * As far as I know, this is a reasonable assumption.
+        */
+       if (size > sizeof(long))
+               align = sizeof(long long);
+       else if (size > sizeof(int))
+               align = sizeof(long);
+       else if (size > sizeof(short))
+               align = sizeof(int);
+       else if (size > sizeof(char))
+               align = sizeof(short);
+       else
+               return (char *) ptr;
+
+       r = size % align;
+
+       if (r == 0)
+               return (char *) ptr;
+
+       return (char *) (((unsigned long) ptr) + align - r);
+}
+
+
+EXPORT_SYMBOL(edac_mc_alloc);
+
+/**
+ * edac_mc_alloc: Allocate a struct mem_ctl_info structure
+ * @size_pvt:  size of private storage needed
+ * @nr_csrows: Number of CWROWS needed for this MC
+ * @nr_chans:  Number of channels for the MC
+ *
+ * Everything is kmalloc'ed as one big chunk - more efficient.
+ * Only can be used if all structures have the same lifetime - otherwise
+ * you have to allocate and initialize your own structures.
+ *
+ * Use edac_mc_free() to free mc structures allocated by this function.
+ *
+ * Returns:
+ *     NULL allocation failed
+ *     struct mem_ctl_info pointer
+ */
+struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
+                                       unsigned nr_chans)
+{
+       struct mem_ctl_info *mci;
+       struct csrow_info *csi, *csrow;
+       struct channel_info *chi, *chp, *chan;
+       void *pvt;
+       unsigned size;
+       int row, chn;
+
+       /* Figure out the offsets of the various items from the start of an mc
+        * structure.  We want the alignment of each item to be at least as
+        * stringent as what the compiler would provide if we could simply
+        * hardcode everything into a single struct.
+        */
+       mci = (struct mem_ctl_info *) 0;
+       csi = (struct csrow_info *)align_ptr(&mci[1], sizeof(*csi));
+       chi = (struct channel_info *)
+                       align_ptr(&csi[nr_csrows], sizeof(*chi));
+       pvt = align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
+       size = ((unsigned long) pvt) + sz_pvt;
+
+       if ((mci = kmalloc(size, GFP_KERNEL)) == NULL)
+               return NULL;
+
+       /* Adjust pointers so they point within the memory we just allocated
+        * rather than an imaginary chunk of memory located at address 0.
+        */
+       csi = (struct csrow_info *) (((char *) mci) + ((unsigned long) csi));
+       chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
+       pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
+
+       memset(mci, 0, size);   /* clear all fields */
+
+       mci->csrows = csi;
+       mci->pvt_info = pvt;
+       mci->nr_csrows = nr_csrows;
+
+       for (row = 0; row < nr_csrows; row++) {
+               csrow = &csi[row];
+               csrow->csrow_idx = row;
+               csrow->mci = mci;
+               csrow->nr_channels = nr_chans;
+               chp = &chi[row * nr_chans];
+               csrow->channels = chp;
+
+               for (chn = 0; chn < nr_chans; chn++) {
+                       chan = &chp[chn];
+                       chan->chan_idx = chn;
+                       chan->csrow = csrow;
+               }
+       }
+
+       return mci;
+}
+
+
+EXPORT_SYMBOL(edac_mc_free);
+
+/**
+ * edac_mc_free:  Free a previously allocated 'mci' structure
+ * @mci: pointer to a struct mem_ctl_info structure
+ *
+ * Free up a previously allocated mci structure
+ * A MCI structure can be in 2 states after being allocated
+ * by edac_mc_alloc().
+ *     1) Allocated in a MC driver's probe, but not yet committed
+ *     2) Allocated and committed, by a call to  edac_mc_add_mc()
+ * edac_mc_add_mc() is the function that adds the sysfs entries
+ * thus, this free function must determine which state the 'mci'
+ * structure is in, then either free it directly or
+ * perform kobject cleanup by calling edac_remove_sysfs_mci_device().
+ *
+ * VOID Return
+ */
+void edac_mc_free(struct mem_ctl_info *mci)
+{
+       /* only if sysfs entries for this mci instance exist
+        * do we remove them and defer the actual kfree via
+        * the kobject 'release()' callback.
+        *
+        * Otherwise, do a straight kfree now.
+        */
+       if (mci->sysfs_active == MCI_SYSFS_ACTIVE)
+               edac_remove_sysfs_mci_device(mci);
+       else
+               kfree(mci);
+}
+
+
+
+EXPORT_SYMBOL(edac_mc_find_mci_by_pdev);
+
+struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev)
+{
+       struct mem_ctl_info *mci;
+       struct list_head *item;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       list_for_each(item, &mc_devices) {
+               mci = list_entry(item, struct mem_ctl_info, link);
+
+               if (mci->pdev == pdev)
+                       return mci;
+       }
+
+       return NULL;
+}
+
+static int add_mc_to_global_list (struct mem_ctl_info *mci)
+{
+       struct list_head *item, *insert_before;
+       struct mem_ctl_info *p;
+       int i;
+
+       if (list_empty(&mc_devices)) {
+               mci->mc_idx = 0;
+               insert_before = &mc_devices;
+       } else {
+               if (edac_mc_find_mci_by_pdev(mci->pdev)) {
+                       printk(KERN_WARNING
+                               "EDAC MC: %s (%s) %s %s already assigned %d\n",
+                               mci->pdev->dev.bus_id, pci_name(mci->pdev),
+                               mci->mod_name, mci->ctl_name, mci->mc_idx);
+                       return 1;
+               }
+
+               insert_before = NULL;
+               i = 0;
+
+               list_for_each(item, &mc_devices) {
+                       p = list_entry(item, struct mem_ctl_info, link);
+
+                       if (p->mc_idx != i) {
+                               insert_before = item;
+                               break;
+                       }
+
+                       i++;
+               }
+
+               mci->mc_idx = i;
+
+               if (insert_before == NULL)
+                       insert_before = &mc_devices;
+       }
+
+       list_add_tail_rcu(&mci->link, insert_before);
+       return 0;
+}
+
+
+
+EXPORT_SYMBOL(edac_mc_add_mc);
+
+/**
+ * edac_mc_add_mc: Insert the 'mci' structure into the mci global list
+ * @mci: pointer to the mci structure to be added to the list
+ *
+ * Return:
+ *     0       Success
+ *     !0      Failure
+ */
+
+/* FIXME - should a warning be printed if no error detection? correction? */
+int edac_mc_add_mc(struct mem_ctl_info *mci)
+{
+       int rc = 1;
+
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+#ifdef CONFIG_EDAC_DEBUG
+       if (edac_debug_level >= 3)
+               edac_mc_dump_mci(mci);
+       if (edac_debug_level >= 4) {
+               int i;
+
+               for (i = 0; i < mci->nr_csrows; i++) {
+                       int j;
+                       edac_mc_dump_csrow(&mci->csrows[i]);
+                       for (j = 0; j < mci->csrows[i].nr_channels; j++)
+                               edac_mc_dump_channel(&mci->csrows[i].
+                                                         channels[j]);
+               }
+       }
+#endif
+       down(&mem_ctls_mutex);
+
+       if (add_mc_to_global_list(mci))
+               goto finish;
+
+       /* set load time so that error rate can be tracked */
+       mci->start_time = jiffies;
+
+        if (edac_create_sysfs_mci_device(mci)) {
+                printk(KERN_WARNING
+                       "EDAC MC%d: failed to create sysfs device\n",
+                       mci->mc_idx);
+               /* FIXME - should there be an error code and unwind? */
+                goto finish;
+        }
+
+       /* Report action taken */
+       printk(KERN_INFO
+              "EDAC MC%d: Giving out device to %s %s: PCI %s\n",
+              mci->mc_idx, mci->mod_name, mci->ctl_name,
+              pci_name(mci->pdev));
+
+
+       rc = 0;
+
+finish:
+       up(&mem_ctls_mutex);
+       return rc;
+}
+
+
+
+static void complete_mc_list_del (struct rcu_head *head)
+{
+       struct mem_ctl_info *mci;
+
+       mci = container_of(head, struct mem_ctl_info, rcu);
+       INIT_LIST_HEAD(&mci->link);
+       complete(&mci->complete);
+}
+
+static void del_mc_from_global_list (struct mem_ctl_info *mci)
+{
+       list_del_rcu(&mci->link);
+       init_completion(&mci->complete);
+       call_rcu(&mci->rcu, complete_mc_list_del);
+       wait_for_completion(&mci->complete);
+}
+
+EXPORT_SYMBOL(edac_mc_del_mc);
+
+/**
+ * edac_mc_del_mc:  Remove the specified mci structure from global list
+ * @mci:       Pointer to struct mem_ctl_info structure
+ *
+ * Returns:
+ *     0       Success
+ *     1       Failure
+ */
+int edac_mc_del_mc(struct mem_ctl_info *mci)
+{
+       int rc = 1;
+
+       debugf0("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+       down(&mem_ctls_mutex);
+       del_mc_from_global_list(mci);
+       printk(KERN_INFO
+              "EDAC MC%d: Removed device %d for %s %s: PCI %s\n",
+              mci->mc_idx, mci->mc_idx, mci->mod_name, mci->ctl_name,
+              pci_name(mci->pdev));
+       rc = 0;
+       up(&mem_ctls_mutex);
+
+       return rc;
+}
+
+
+EXPORT_SYMBOL(edac_mc_scrub_block);
+
+void edac_mc_scrub_block(unsigned long page, unsigned long offset,
+                             u32 size)
+{
+       struct page *pg;
+       void *virt_addr;
+       unsigned long flags = 0;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       /* ECC error page was not in our memory. Ignore it. */
+       if(!pfn_valid(page))
+               return;
+
+       /* Find the actual page structure then map it and fix */
+       pg = pfn_to_page(page);
+
+       if (PageHighMem(pg))
+               local_irq_save(flags);
+
+       virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
+
+       /* Perform architecture specific atomic scrub operation */
+       atomic_scrub(virt_addr + offset, size);
+
+       /* Unmap and complete */
+       kunmap_atomic(virt_addr, KM_BOUNCE_READ);
+
+       if (PageHighMem(pg))
+               local_irq_restore(flags);
+}
+
+
+/* FIXME - should return -1 */
+EXPORT_SYMBOL(edac_mc_find_csrow_by_page);
+
+int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
+                                   unsigned long page)
+{
+       struct csrow_info *csrows = mci->csrows;
+       int row, i;
+
+       debugf1("MC%d: " __FILE__ ": %s(): 0x%lx\n", mci->mc_idx, __func__,
+               page);
+       row = -1;
+
+       for (i = 0; i < mci->nr_csrows; i++) {
+               struct csrow_info *csrow = &csrows[i];
+
+               if (csrow->nr_pages == 0)
+                       continue;
+
+               debugf3("MC%d: " __FILE__
+                       ": %s(): first(0x%lx) page(0x%lx)"
+                       " last(0x%lx) mask(0x%lx)\n", mci->mc_idx,
+                       __func__, csrow->first_page, page,
+                       csrow->last_page, csrow->page_mask);
+
+               if ((page >= csrow->first_page) &&
+                   (page <= csrow->last_page) &&
+                   ((page & csrow->page_mask) ==
+                    (csrow->first_page & csrow->page_mask))) {
+                       row = i;
+                       break;
+               }
+       }
+
+       if (row == -1)
+               printk(KERN_ERR
+                      "EDAC MC%d: could not look up page error address %lx\n",
+                      mci->mc_idx, (unsigned long) page);
+
+       return row;
+}
+
+
+EXPORT_SYMBOL(edac_mc_handle_ce);
+
+/* FIXME - setable log (warning/emerg) levels */
+/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
+void edac_mc_handle_ce(struct mem_ctl_info *mci,
+                           unsigned long page_frame_number,
+                           unsigned long offset_in_page,
+                           unsigned long syndrome, int row, int channel,
+                           const char *msg)
+{
+       unsigned long remapped_page;
+
+       debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+
+       /* FIXME - maybe make panic on INTERNAL ERROR an option */
+       if (row >= mci->nr_csrows || row < 0) {
+               /* something is wrong */
+               printk(KERN_ERR
+                      "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
+                      mci->mc_idx, row, mci->nr_csrows);
+               edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+               return;
+       }
+       if (channel >= mci->csrows[row].nr_channels || channel < 0) {
+               /* something is wrong */
+               printk(KERN_ERR
+                      "EDAC MC%d: INTERNAL ERROR: channel out of range "
+                      "(%d >= %d)\n",
+                      mci->mc_idx, channel, mci->csrows[row].nr_channels);
+               edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+               return;
+       }
+
+       if (log_ce)
+               /* FIXME - put in DIMM location */
+               printk(KERN_WARNING
+                      "EDAC MC%d: CE page 0x%lx, offset 0x%lx,"
+                      " grain %d, syndrome 0x%lx, row %d, channel %d,"
+                      " label \"%s\": %s\n", mci->mc_idx,
+                      page_frame_number, offset_in_page,
+                      mci->csrows[row].grain, syndrome, row, channel,
+                      mci->csrows[row].channels[channel].label, msg);
+
+       mci->ce_count++;
+       mci->csrows[row].ce_count++;
+       mci->csrows[row].channels[channel].ce_count++;
+
+       if (mci->scrub_mode & SCRUB_SW_SRC) {
+               /*
+                * Some MC's can remap memory so that it is still available
+                * at a different address when PCI devices map into memory.
+                * MC's that can't do this lose the memory where PCI devices
+                * are mapped.  This mapping is MC dependant and so we call
+                * back into the MC driver for it to map the MC page to
+                * a physical (CPU) page which can then be mapped to a virtual
+                * page - which can then be scrubbed.
+                */
+               remapped_page = mci->ctl_page_to_phys ?
+                   mci->ctl_page_to_phys(mci, page_frame_number) :
+                   page_frame_number;
+
+               edac_mc_scrub_block(remapped_page, offset_in_page,
+                                        mci->csrows[row].grain);
+       }
+}
+
+
+EXPORT_SYMBOL(edac_mc_handle_ce_no_info);
+
+void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
+                                   const char *msg)
+{
+       if (log_ce)
+               printk(KERN_WARNING
+                      "EDAC MC%d: CE - no information available: %s\n",
+                      mci->mc_idx, msg);
+       mci->ce_noinfo_count++;
+       mci->ce_count++;
+}
+
+
+EXPORT_SYMBOL(edac_mc_handle_ue);
+
+void edac_mc_handle_ue(struct mem_ctl_info *mci,
+                           unsigned long page_frame_number,
+                           unsigned long offset_in_page, int row,
+                           const char *msg)
+{
+       int len = EDAC_MC_LABEL_LEN * 4;
+       char labels[len + 1];
+       char *pos = labels;
+       int chan;
+       int chars;
+
+       debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+
+       /* FIXME - maybe make panic on INTERNAL ERROR an option */
+       if (row >= mci->nr_csrows || row < 0) {
+               /* something is wrong */
+               printk(KERN_ERR
+                      "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n",
+                      mci->mc_idx, row, mci->nr_csrows);
+               edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+               return;
+       }
+
+       chars = snprintf(pos, len + 1, "%s",
+                        mci->csrows[row].channels[0].label);
+       len -= chars;
+       pos += chars;
+       for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
+            chan++) {
+               chars = snprintf(pos, len + 1, ":%s",
+                                mci->csrows[row].channels[chan].label);
+               len -= chars;
+               pos += chars;
+       }
+
+       if (log_ue)
+               printk(KERN_EMERG
+                      "EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
+                      " labels \"%s\": %s\n", mci->mc_idx,
+                      page_frame_number, offset_in_page,
+                      mci->csrows[row].grain, row, labels, msg);
+
+       if (panic_on_ue)
+               panic
+                   ("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d,"
+                    " labels \"%s\": %s\n", mci->mc_idx,
+                    page_frame_number, offset_in_page,
+                    mci->csrows[row].grain, row, labels, msg);
+
+       mci->ue_count++;
+       mci->csrows[row].ue_count++;
+}
+
+
+EXPORT_SYMBOL(edac_mc_handle_ue_no_info);
+
+void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
+                                   const char *msg)
+{
+       if (panic_on_ue)
+               panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
+
+       if (log_ue)
+               printk(KERN_WARNING
+                      "EDAC MC%d: UE - no information available: %s\n",
+                      mci->mc_idx, msg);
+       mci->ue_noinfo_count++;
+       mci->ue_count++;
+}
+
+
+#ifdef CONFIG_PCI
+
+static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
+{
+       int where;
+       u16 status;
+
+       where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
+       pci_read_config_word(dev, where, &status);
+
+       /* If we get back 0xFFFF then we must suspect that the card has been pulled but
+          the Linux PCI layer has not yet finished cleaning up. We don't want to report
+          on such devices */
+
+       if (status == 0xFFFF) {
+               u32 sanity;
+               pci_read_config_dword(dev, 0, &sanity);
+               if (sanity == 0xFFFFFFFF)
+                       return 0;
+       }
+       status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
+                 PCI_STATUS_PARITY;
+
+       if (status)
+               /* reset only the bits we are interested in */
+               pci_write_config_word(dev, where, status);
+
+       return status;
+}
+
+typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
+
+/* Clear any PCI parity errors logged by this device. */
+static void edac_pci_dev_parity_clear( struct pci_dev *dev )
+{
+       u8 header_type;
+
+       get_pci_parity_status(dev, 0);
+
+       /* read the device TYPE, looking for bridges */
+       pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+       if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
+               get_pci_parity_status(dev, 1);
+}
+
+/*
+ *  PCI Parity polling
+ *
+ */
+static void edac_pci_dev_parity_test(struct pci_dev *dev)
+{
+       u16 status;
+       u8  header_type;
+
+       /* read the STATUS register on this device
+        */
+       status = get_pci_parity_status(dev, 0);
+
+       debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id );
+
+       /* check the status reg for errors */
+       if (status) {
+               if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
+                       printk(KERN_CRIT
+                               "EDAC PCI- "
+                               "Signaled System Error on %s\n",
+                               pci_name (dev));
+
+               if (status & (PCI_STATUS_PARITY)) {
+                       printk(KERN_CRIT
+                               "EDAC PCI- "
+                               "Master Data Parity Error on %s\n",
+                               pci_name (dev));
+
+                       atomic_inc(&pci_parity_count);
+               }
+
+               if (status & (PCI_STATUS_DETECTED_PARITY)) {
+                       printk(KERN_CRIT
+                               "EDAC PCI- "
+                               "Detected Parity Error on %s\n",
+                               pci_name (dev));
+
+                       atomic_inc(&pci_parity_count);
+               }
+       }
+
+       /* read the device TYPE, looking for bridges */
+       pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+       debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id );
+
+       if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+               /* On bridges, need to examine secondary status register  */
+               status = get_pci_parity_status(dev, 1);
+
+               debugf2("PCI SEC_STATUS= 0x%04x %s\n",
+                               status, dev->dev.bus_id );
+
+               /* check the secondary status reg for errors */
+               if (status) {
+                       if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
+                               printk(KERN_CRIT
+                                       "EDAC PCI-Bridge- "
+                                       "Signaled System Error on %s\n",
+                                       pci_name (dev));
+
+                       if (status & (PCI_STATUS_PARITY)) {
+                               printk(KERN_CRIT
+                                       "EDAC PCI-Bridge- "
+                                       "Master Data Parity Error on %s\n",
+                                       pci_name (dev));
+
+                               atomic_inc(&pci_parity_count);
+                       }
+
+                       if (status & (PCI_STATUS_DETECTED_PARITY)) {
+                               printk(KERN_CRIT
+                                       "EDAC PCI-Bridge- "
+                                       "Detected Parity Error on %s\n",
+                                       pci_name (dev));
+
+                               atomic_inc(&pci_parity_count);
+                       }
+               }
+       }
+}
+
+/*
+ * check_dev_on_list: Scan for a PCI device on a white/black list
+ * @list:      an EDAC  &edac_pci_device_list  white/black list pointer
+ * @free_index:        index of next free entry on the list
+ * @pci_dev:   PCI Device pointer
+ *
+ * see if list contains the device.
+ *
+ * Returns:    0 not found
+ *             1 found on list
+ */
+static int check_dev_on_list(struct edac_pci_device_list *list, int free_index,
+                               struct pci_dev *dev)
+{
+        int i;
+        int rc = 0;     /* Assume not found */
+        unsigned short vendor=dev->vendor;
+        unsigned short device=dev->device;
+
+        /* Scan the list, looking for a vendor/device match
+         */
+        for (i = 0; i < free_index; i++, list++ ) {
+                if (    (list->vendor == vendor ) &&
+                        (list->device == device )) {
+                        rc = 1;
+                        break;
+                }
+        }
+
+        return rc;
+}
+
+/*
+ * pci_dev parity list iterator
+ *     Scan the PCI device list for one iteration, looking for SERRORs
+ *     Master Parity ERRORS or Parity ERRORs on primary or secondary devices
+ */
+static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
+{
+       struct pci_dev *dev=NULL;
+
+       /* request for kernel access to the next PCI device, if any,
+        * and while we are looking at it have its reference count
+        * bumped until we are done with it
+        */
+       while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+
+                /* if whitelist exists then it has priority, so only scan those
+                 * devices on the whitelist
+                 */
+                if (pci_whitelist_count > 0 ) {
+                        if (check_dev_on_list(pci_whitelist,
+                                       pci_whitelist_count, dev))
+                               fn(dev);
+                } else {
+                       /*
+                        * if no whitelist, then check if this devices is
+                        * blacklisted
+                        */
+                        if (!check_dev_on_list(pci_blacklist,
+                                       pci_blacklist_count, dev))
+                               fn(dev);
+                }
+       }
+}
+
+static void do_pci_parity_check(void)
+{
+       unsigned long flags;
+       int before_count;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       if (!check_pci_parity)
+               return;
+
+       before_count = atomic_read(&pci_parity_count);
+
+       /* scan all PCI devices looking for a Parity Error on devices and
+        * bridges
+        */
+       local_irq_save(flags);
+       edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
+       local_irq_restore(flags);
+
+       /* Only if operator has selected panic on PCI Error */
+       if (panic_on_pci_parity) {
+               /* If the count is different 'after' from 'before' */
+               if (before_count != atomic_read(&pci_parity_count))
+                       panic("EDAC: PCI Parity Error");
+       }
+}
+
+
+static inline void clear_pci_parity_errors(void)
+{
+       /* Clear any PCI bus parity errors that devices initially have logged
+        * in their registers.
+        */
+       edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
+}
+
+
+#else  /* CONFIG_PCI */
+
+
+static inline void do_pci_parity_check(void)
+{
+       /* no-op */
+}
+
+
+static inline void clear_pci_parity_errors(void)
+{
+       /* no-op */
+}
+
+
+#endif  /* CONFIG_PCI */
+
+/*
+ * Iterate over all MC instances and check for ECC, et al, errors
+ */
+static inline void check_mc_devices (void)
+{
+       unsigned long flags;
+       struct list_head *item;
+       struct mem_ctl_info *mci;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       /* during poll, have interrupts off */
+       local_irq_save(flags);
+
+       list_for_each(item, &mc_devices) {
+               mci = list_entry(item, struct mem_ctl_info, link);
+
+               if (mci->edac_check != NULL)
+                       mci->edac_check(mci);
+       }
+
+       local_irq_restore(flags);
+}
+
+
+/*
+ * Check MC status every poll_msec.
+ * Check PCI status every poll_msec as well.
+ *
+ * This where the work gets done for edac.
+ *
+ * SMP safe, doesn't use NMI, and auto-rate-limits.
+ */
+static void do_edac_check(void)
+{
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       check_mc_devices();
+
+       do_pci_parity_check();
+}
+
+
+/*
+ * EDAC thread state information
+ */
+struct bs_thread_info
+{
+       struct task_struct *task;
+       struct completion *event;
+       char *name;
+       void (*run)(void);
+};
+
+static struct bs_thread_info bs_thread;
+
+/*
+ *  edac_kernel_thread
+ *      This the kernel thread that processes edac operations
+ *      in a normal thread environment
+ */
+static int edac_kernel_thread(void *arg)
+{
+       struct bs_thread_info *thread = (struct bs_thread_info *) arg;
+
+       /* detach thread */
+       daemonize(thread->name);
+
+       current->exit_signal = SIGCHLD;
+       allow_signal(SIGKILL);
+       thread->task = current;
+
+       /* indicate to starting task we have started */
+       complete(thread->event);
+
+       /* loop forever, until we are told to stop */
+       while(thread->run != NULL) {
+               void (*run)(void);
+
+               /* call the function to check the memory controllers */
+               run = thread->run;
+               if (run)
+                       run();
+
+               if (signal_pending(current))
+                       flush_signals(current);
+
+               /* ensure we are interruptable */
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               /* goto sleep for the interval */
+               schedule_timeout((HZ * poll_msec) / 1000);
+               try_to_freeze();
+       }
+
+       /* notify waiter that we are exiting */
+       complete(thread->event);
+
+       return 0;
+}
+
+/*
+ * edac_mc_init
+ *      module initialization entry point
+ */
+static int __init edac_mc_init(void)
+{
+       int ret;
+       struct completion event;
+
+       printk(KERN_INFO "MC: " __FILE__ " version " EDAC_MC_VERSION "\n");
+
+       /*
+        * Harvest and clear any boot/initialization PCI parity errors
+        *
+        * FIXME: This only clears errors logged by devices present at time of
+        *      module initialization.  We should also do an initial clear
+        *      of each newly hotplugged device.
+        */
+       clear_pci_parity_errors();
+
+       /* perform check for first time to harvest boot leftovers */
+       do_edac_check();
+
+       /* Create the MC sysfs entires */
+       if (edac_sysfs_memctrl_setup()) {
+               printk(KERN_ERR "EDAC MC: Error initializing sysfs code\n");
+               return -ENODEV;
+       }
+
+       /* Create the PCI parity sysfs entries */
+       if (edac_sysfs_pci_setup()) {
+               edac_sysfs_memctrl_teardown();
+               printk(KERN_ERR "EDAC PCI: Error initializing sysfs code\n");
+               return -ENODEV;
+       }
+
+       /* Create our kernel thread */
+       init_completion(&event);
+       bs_thread.event = &event;
+       bs_thread.name = "kedac";
+       bs_thread.run = do_edac_check;
+
+       /* create our kernel thread */
+       ret = kernel_thread(edac_kernel_thread, &bs_thread, CLONE_KERNEL);
+       if (ret < 0) {
+               /* remove the sysfs entries */
+               edac_sysfs_memctrl_teardown();
+               edac_sysfs_pci_teardown();
+               return -ENOMEM;
+       }
+
+       /* wait for our kernel theard ack that it is up and running */
+       wait_for_completion(&event);
+
+       return 0;
+}
+
+
+/*
+ * edac_mc_exit()
+ *      module exit/termination functioni
+ */
+static void __exit edac_mc_exit(void)
+{
+       struct completion event;
+
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+       init_completion(&event);
+       bs_thread.event = &event;
+
+       /* As soon as ->run is set to NULL, the task could disappear,
+        * so we need to hold tasklist_lock until we have sent the signal
+        */
+       read_lock(&tasklist_lock);
+       bs_thread.run = NULL;
+       send_sig(SIGKILL, bs_thread.task, 1);
+       read_unlock(&tasklist_lock);
+       wait_for_completion(&event);
+
+        /* tear down the sysfs device */
+       edac_sysfs_memctrl_teardown();
+       edac_sysfs_pci_teardown();
+}
+
+
+
+
+module_init(edac_mc_init);
+module_exit(edac_mc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
+             "Based on.work by Dan Hollis et al");
+MODULE_DESCRIPTION("Core library routines for MC reporting");
+
+module_param(panic_on_ue, int, 0644);
+MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
+module_param(check_pci_parity, int, 0644);
+MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on");
+module_param(panic_on_pci_parity, int, 0644);
+MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on");
+module_param(log_ue, int, 0644);
+MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on");
+module_param(log_ce, int, 0644);
+MODULE_PARM_DESC(log_ce, "Log correctable error to console: 0=off 1=on");
+module_param(poll_msec, int, 0644);
+MODULE_PARM_DESC(poll_msec, "Polling period in milliseconds");
+#ifdef CONFIG_EDAC_DEBUG
+module_param(edac_debug_level, int, 0644);
+MODULE_PARM_DESC(edac_debug_level, "Debug level");
+#endif
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
new file mode 100644 (file)
index 0000000..75ecf48
--- /dev/null
@@ -0,0 +1,448 @@
+/*
+ * MC kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ *     http://www.anime.net/~goemon/linux-ecc/
+ *
+ * NMI handling support added by
+ *     Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
+ *
+ * $Id: edac_mc.h,v 1.4.2.10 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ */
+
+
+#ifndef _EDAC_MC_H_
+#define _EDAC_MC_H_
+
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/nmi.h>
+#include <linux/rcupdate.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+
+
+#define EDAC_MC_LABEL_LEN      31
+#define MC_PROC_NAME_MAX_LEN 7
+
+#if PAGE_SHIFT < 20
+#define PAGES_TO_MiB( pages )  ( ( pages ) >> ( 20 - PAGE_SHIFT ) )
+#else                          /* PAGE_SHIFT > 20 */
+#define PAGES_TO_MiB( pages )  ( ( pages ) << ( PAGE_SHIFT - 20 ) )
+#endif
+
+#ifdef CONFIG_EDAC_DEBUG
+extern int edac_debug_level;
+#define edac_debug_printk(level, fmt, args...) \
+do { if (level <= edac_debug_level) printk(KERN_DEBUG fmt, ##args); } while(0)
+#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
+#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
+#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
+#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
+#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
+#else                          /* !CONFIG_EDAC_DEBUG */
+#define debugf0( ... )
+#define debugf1( ... )
+#define debugf2( ... )
+#define debugf3( ... )
+#define debugf4( ... )
+#endif                         /* !CONFIG_EDAC_DEBUG */
+
+
+#define bs_xstr(s) bs_str(s)
+#define bs_str(s) #s
+#define BS_MOD_STR bs_xstr(KBUILD_BASENAME)
+
+#define BIT(x) (1 << (x))
+
+#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, PCI_DEVICE_ID_ ## vend ## _ ## dev
+
+/* memory devices */
+enum dev_type {
+       DEV_UNKNOWN = 0,
+       DEV_X1,
+       DEV_X2,
+       DEV_X4,
+       DEV_X8,
+       DEV_X16,
+       DEV_X32,                /* Do these parts exist? */
+       DEV_X64                 /* Do these parts exist? */
+};
+
+#define DEV_FLAG_UNKNOWN       BIT(DEV_UNKNOWN)
+#define DEV_FLAG_X1            BIT(DEV_X1)
+#define DEV_FLAG_X2            BIT(DEV_X2)
+#define DEV_FLAG_X4            BIT(DEV_X4)
+#define DEV_FLAG_X8            BIT(DEV_X8)
+#define DEV_FLAG_X16           BIT(DEV_X16)
+#define DEV_FLAG_X32           BIT(DEV_X32)
+#define DEV_FLAG_X64           BIT(DEV_X64)
+
+/* memory types */
+enum mem_type {
+       MEM_EMPTY = 0,          /* Empty csrow */
+       MEM_RESERVED,           /* Reserved csrow type */
+       MEM_UNKNOWN,            /* Unknown csrow type */
+       MEM_FPM,                /* Fast page mode */
+       MEM_EDO,                /* Extended data out */
+       MEM_BEDO,               /* Burst Extended data out */
+       MEM_SDR,                /* Single data rate SDRAM */
+       MEM_RDR,                /* Registered single data rate SDRAM */
+       MEM_DDR,                /* Double data rate SDRAM */
+       MEM_RDDR,               /* Registered Double data rate SDRAM */
+       MEM_RMBS                /* Rambus DRAM */
+};
+
+#define MEM_FLAG_EMPTY         BIT(MEM_EMPTY)
+#define MEM_FLAG_RESERVED      BIT(MEM_RESERVED)
+#define MEM_FLAG_UNKNOWN       BIT(MEM_UNKNOWN)
+#define MEM_FLAG_FPM           BIT(MEM_FPM)
+#define MEM_FLAG_EDO           BIT(MEM_EDO)
+#define MEM_FLAG_BEDO          BIT(MEM_BEDO)
+#define MEM_FLAG_SDR           BIT(MEM_SDR)
+#define MEM_FLAG_RDR           BIT(MEM_RDR)
+#define MEM_FLAG_DDR           BIT(MEM_DDR)
+#define MEM_FLAG_RDDR          BIT(MEM_RDDR)
+#define MEM_FLAG_RMBS          BIT(MEM_RMBS)
+
+
+/* chipset Error Detection and Correction capabilities and mode */
+enum edac_type {
+       EDAC_UNKNOWN = 0,       /* Unknown if ECC is available */
+       EDAC_NONE,              /* Doesnt support ECC */
+       EDAC_RESERVED,          /* Reserved ECC type */
+       EDAC_PARITY,            /* Detects parity errors */
+       EDAC_EC,                /* Error Checking - no correction */
+       EDAC_SECDED,            /* Single bit error correction, Double detection */
+       EDAC_S2ECD2ED,          /* Chipkill x2 devices - do these exist? */
+       EDAC_S4ECD4ED,          /* Chipkill x4 devices */
+       EDAC_S8ECD8ED,          /* Chipkill x8 devices */
+       EDAC_S16ECD16ED,        /* Chipkill x16 devices */
+};
+
+#define EDAC_FLAG_UNKNOWN      BIT(EDAC_UNKNOWN)
+#define EDAC_FLAG_NONE         BIT(EDAC_NONE)
+#define EDAC_FLAG_PARITY       BIT(EDAC_PARITY)
+#define EDAC_FLAG_EC           BIT(EDAC_EC)
+#define EDAC_FLAG_SECDED       BIT(EDAC_SECDED)
+#define EDAC_FLAG_S2ECD2ED     BIT(EDAC_S2ECD2ED)
+#define EDAC_FLAG_S4ECD4ED     BIT(EDAC_S4ECD4ED)
+#define EDAC_FLAG_S8ECD8ED     BIT(EDAC_S8ECD8ED)
+#define EDAC_FLAG_S16ECD16ED   BIT(EDAC_S16ECD16ED)
+
+
+/* scrubbing capabilities */
+enum scrub_type {
+       SCRUB_UNKNOWN = 0,      /* Unknown if scrubber is available */
+       SCRUB_NONE,             /* No scrubber */
+       SCRUB_SW_PROG,          /* SW progressive (sequential) scrubbing */
+       SCRUB_SW_SRC,           /* Software scrub only errors */
+       SCRUB_SW_PROG_SRC,      /* Progressive software scrub from an error */
+       SCRUB_SW_TUNABLE,       /* Software scrub frequency is tunable */
+       SCRUB_HW_PROG,          /* HW progressive (sequential) scrubbing */
+       SCRUB_HW_SRC,           /* Hardware scrub only errors */
+       SCRUB_HW_PROG_SRC,      /* Progressive hardware scrub from an error */
+       SCRUB_HW_TUNABLE        /* Hardware scrub frequency is tunable */
+};
+
+#define SCRUB_FLAG_SW_PROG     BIT(SCRUB_SW_PROG)
+#define SCRUB_FLAG_SW_SRC      BIT(SCRUB_SW_SRC_CORR)
+#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC_CORR)
+#define SCRUB_FLAG_SW_TUN      BIT(SCRUB_SW_SCRUB_TUNABLE)
+#define SCRUB_FLAG_HW_PROG     BIT(SCRUB_HW_PROG)
+#define SCRUB_FLAG_HW_SRC      BIT(SCRUB_HW_SRC_CORR)
+#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR)
+#define SCRUB_FLAG_HW_TUN      BIT(SCRUB_HW_TUNABLE)
+
+enum mci_sysfs_status {
+       MCI_SYSFS_INACTIVE = 0, /* sysfs entries NOT registered */
+       MCI_SYSFS_ACTIVE        /* sysfs entries ARE registered */
+};
+
+/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
+
+/*
+ * There are several things to be aware of that aren't at all obvious:
+ *
+ *
+ * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
+ *
+ * These are some of the many terms that are thrown about that don't always
+ * mean what people think they mean (Inconceivable!).  In the interest of
+ * creating a common ground for discussion, terms and their definitions
+ * will be established.
+ *
+ * Memory devices:     The individual chip on a memory stick.  These devices
+ *                     commonly output 4 and 8 bits each.  Grouping several
+ *                     of these in parallel provides 64 bits which is common
+ *                     for a memory stick.
+ *
+ * Memory Stick:       A printed circuit board that agregates multiple
+ *                     memory devices in parallel.  This is the atomic
+ *                     memory component that is purchaseable by Joe consumer
+ *                     and loaded into a memory socket.
+ *
+ * Socket:             A physical connector on the motherboard that accepts
+ *                     a single memory stick.
+ *
+ * Channel:            Set of memory devices on a memory stick that must be
+ *                     grouped in parallel with one or more additional
+ *                     channels from other memory sticks.  This parallel
+ *                     grouping of the output from multiple channels are
+ *                     necessary for the smallest granularity of memory access.
+ *                     Some memory controllers are capable of single channel -
+ *                     which means that memory sticks can be loaded
+ *                     individually.  Other memory controllers are only
+ *                     capable of dual channel - which means that memory
+ *                     sticks must be loaded as pairs (see "socket set").
+ *
+ * Chip-select row:    All of the memory devices that are selected together.
+ *                     for a single, minimum grain of memory access.
+ *                     This selects all of the parallel memory devices across
+ *                     all of the parallel channels.  Common chip-select rows
+ *                     for single channel are 64 bits, for dual channel 128
+ *                     bits.
+ *
+ * Single-Ranked stick:        A Single-ranked stick has 1 chip-select row of memmory.
+ *                     Motherboards commonly drive two chip-select pins to
+ *                     a memory stick. A single-ranked stick, will occupy
+ *                     only one of those rows. The other will be unused.
+ *
+ * Double-Ranked stick:        A double-ranked stick has two chip-select rows which
+ *                     access different sets of memory devices.  The two
+ *                     rows cannot be accessed concurrently.
+ *
+ * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
+ *                     A double-sided stick has two chip-select rows which
+ *                     access different sets of memory devices.  The two
+ *                     rows cannot be accessed concurrently.  "Double-sided"
+ *                     is irrespective of the memory devices being mounted
+ *                     on both sides of the memory stick.
+ *
+ * Socket set:         All of the memory sticks that are required for for
+ *                     a single memory access or all of the memory sticks
+ *                     spanned by a chip-select row.  A single socket set
+ *                     has two chip-select rows and if double-sided sticks
+ *                     are used these will occupy those chip-select rows.
+ *
+ * Bank:               This term is avoided because it is unclear when
+ *                     needing to distinguish between chip-select rows and
+ *                     socket sets.
+ *
+ * Controller pages:
+ *
+ * Physical pages:
+ *
+ * Virtual pages:
+ *
+ *
+ * STRUCTURE ORGANIZATION AND CHOICES
+ *
+ *
+ *
+ * PS - I enjoyed writing all that about as much as you enjoyed reading it.
+ */
+
+
+struct channel_info {
+       int chan_idx;           /* channel index */
+       u32 ce_count;           /* Correctable Errors for this CHANNEL */
+       char label[EDAC_MC_LABEL_LEN + 1];      /* DIMM label on motherboard */
+       struct csrow_info *csrow;       /* the parent */
+};
+
+
+struct csrow_info {
+       unsigned long first_page;       /* first page number in dimm */
+       unsigned long last_page;        /* last page number in dimm */
+       unsigned long page_mask;        /* used for interleaving -
+                                          0UL for non intlv */
+       u32 nr_pages;           /* number of pages in csrow */
+       u32 grain;              /* granularity of reported error in bytes */
+       int csrow_idx;          /* the chip-select row */
+       enum dev_type dtype;    /* memory device type */
+       u32 ue_count;           /* Uncorrectable Errors for this csrow */
+       u32 ce_count;           /* Correctable Errors for this csrow */
+       enum mem_type mtype;    /* memory csrow type */
+       enum edac_type edac_mode;       /* EDAC mode for this csrow */
+       struct mem_ctl_info *mci;       /* the parent */
+
+       struct kobject kobj;    /* sysfs kobject for this csrow */
+
+       /* FIXME the number of CHANNELs might need to become dynamic */
+       u32 nr_channels;
+       struct channel_info *channels;
+};
+
+
+struct mem_ctl_info {
+       struct list_head link;  /* for global list of mem_ctl_info structs */
+       unsigned long mtype_cap;        /* memory types supported by mc */
+       unsigned long edac_ctl_cap;     /* Mem controller EDAC capabilities */
+       unsigned long edac_cap; /* configuration capabilities - this is
+                                  closely related to edac_ctl_cap.  The
+                                  difference is that the controller
+                                  may be capable of s4ecd4ed which would
+                                  be listed in edac_ctl_cap, but if
+                                  channels aren't capable of s4ecd4ed then the
+                                  edac_cap would not have that capability. */
+       unsigned long scrub_cap;        /* chipset scrub capabilities */
+       enum scrub_type scrub_mode;     /* current scrub mode */
+
+       enum mci_sysfs_status sysfs_active;     /* status of sysfs */
+
+       /* pointer to edac checking routine */
+       void (*edac_check) (struct mem_ctl_info * mci);
+       /*
+        * Remaps memory pages: controller pages to physical pages.
+        * For most MC's, this will be NULL.
+        */
+       /* FIXME - why not send the phys page to begin with? */
+       unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
+                                          unsigned long page);
+       int mc_idx;
+       int nr_csrows;
+       struct csrow_info *csrows;
+       /*
+        * FIXME - what about controllers on other busses? - IDs must be
+        * unique.  pdev pointer should be sufficiently unique, but
+        * BUS:SLOT.FUNC numbers may not be unique.
+        */
+       struct pci_dev *pdev;
+       const char *mod_name;
+       const char *mod_ver;
+       const char *ctl_name;
+       char proc_name[MC_PROC_NAME_MAX_LEN + 1];
+       void *pvt_info;
+       u32 ue_noinfo_count;    /* Uncorrectable Errors w/o info */
+       u32 ce_noinfo_count;    /* Correctable Errors w/o info */
+       u32 ue_count;           /* Total Uncorrectable Errors for this MC */
+       u32 ce_count;           /* Total Correctable Errors for this MC */
+       unsigned long start_time;       /* mci load start time (in jiffies) */
+
+       /* this stuff is for safe removal of mc devices from global list while
+        * NMI handlers may be traversing list
+        */
+       struct rcu_head rcu;
+       struct completion complete;
+
+       /* edac sysfs device control */
+       struct kobject edac_mci_kobj;
+};
+
+
+
+/* write all or some bits in a byte-register*/
+static inline void pci_write_bits8(struct pci_dev *pdev, int offset,
+                                  u8 value, u8 mask)
+{
+       if (mask != 0xff) {
+               u8 buf;
+               pci_read_config_byte(pdev, offset, &buf);
+               value &= mask;
+               buf &= ~mask;
+               value |= buf;
+       }
+       pci_write_config_byte(pdev, offset, value);
+}
+
+
+/* write all or some bits in a word-register*/
+static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
+                                   u16 value, u16 mask)
+{
+       if (mask != 0xffff) {
+               u16 buf;
+               pci_read_config_word(pdev, offset, &buf);
+               value &= mask;
+               buf &= ~mask;
+               value |= buf;
+       }
+       pci_write_config_word(pdev, offset, value);
+}
+
+
+/* write all or some bits in a dword-register*/
+static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
+                                   u32 value, u32 mask)
+{
+       if (mask != 0xffff) {
+               u32 buf;
+               pci_read_config_dword(pdev, offset, &buf);
+               value &= mask;
+               buf &= ~mask;
+               value |= buf;
+       }
+       pci_write_config_dword(pdev, offset, value);
+}
+
+
+#ifdef CONFIG_EDAC_DEBUG
+void edac_mc_dump_channel(struct channel_info *chan);
+void edac_mc_dump_mci(struct mem_ctl_info *mci);
+void edac_mc_dump_csrow(struct csrow_info *csrow);
+#endif                         /* CONFIG_EDAC_DEBUG */
+
+extern int edac_mc_add_mc(struct mem_ctl_info *mci);
+extern int edac_mc_del_mc(struct mem_ctl_info *mci);
+
+extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
+                                          unsigned long page);
+
+extern struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev
+                                                         *pdev);
+
+extern void edac_mc_scrub_block(unsigned long page,
+                                    unsigned long offset, u32 size);
+
+/*
+ * The no info errors are used when error overflows are reported.
+ * There are a limited number of error logging registers that can
+ * be exausted.  When all registers are exhausted and an additional
+ * error occurs then an error overflow register records that an
+ * error occured and the type of error, but doesn't have any
+ * further information.  The ce/ue versions make for cleaner
+ * reporting logic and function interface - reduces conditional
+ * statement clutter and extra function arguments.
+ */
+extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
+                                  unsigned long page_frame_number,
+                                  unsigned long offset_in_page,
+                                  unsigned long syndrome,
+                                  int row, int channel, const char *msg);
+
+extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
+                                          const char *msg);
+
+extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
+                                  unsigned long page_frame_number,
+                                  unsigned long offset_in_page,
+                                  int row, const char *msg);
+
+extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
+                                          const char *msg);
+
+/*
+ * This kmalloc's and initializes all the structures.
+ * Can't be used if all structures don't have the same lifetime.
+ */
+extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt,
+               unsigned nr_csrows, unsigned nr_chans);
+
+/* Free an mc previously allocated by edac_mc_alloc() */
+extern void edac_mc_free(struct mem_ctl_info *mci);
+
+
+#endif                         /* _EDAC_MC_H_ */
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
new file mode 100644 (file)
index 0000000..52596e7
--- /dev/null
@@ -0,0 +1,299 @@
+/*
+ * Intel 82860 Memory Controller kernel module
+ * (C) 2005 Red Hat (http://www.redhat.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Ben Woodard <woodard@redhat.com>
+ * shamelessly copied from and based upon the edac_i82875 driver
+ * by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include "edac_mc.h"
+
+
+#ifndef PCI_DEVICE_ID_INTEL_82860_0
+#define PCI_DEVICE_ID_INTEL_82860_0    0x2531
+#endif                         /* PCI_DEVICE_ID_INTEL_82860_0 */
+
+#define I82860_MCHCFG 0x50
+#define I82860_GBA 0x60
+#define I82860_GBA_MASK 0x7FF
+#define I82860_GBA_SHIFT 24
+#define I82860_ERRSTS 0xC8
+#define I82860_EAP 0xE4
+#define I82860_DERRCTL_STS 0xE2
+
+enum i82860_chips {
+       I82860 = 0,
+};
+
+struct i82860_dev_info {
+       const char *ctl_name;
+};
+
+struct i82860_error_info {
+       u16 errsts;
+       u32 eap;
+       u16 derrsyn;
+       u16 errsts2;
+};
+
+static const struct i82860_dev_info i82860_devs[] = {
+       [I82860] = {
+                   .ctl_name = "i82860"},
+};
+
+static struct pci_dev *mci_pdev = NULL;        /* init dev: in case that AGP code
+                                          has already registered driver */
+
+static int i82860_registered = 1;
+
+static void i82860_get_error_info (struct mem_ctl_info *mci,
+               struct i82860_error_info *info)
+{
+       /*
+        * This is a mess because there is no atomic way to read all the
+        * registers at once and the registers can transition from CE being
+        * overwritten by UE.
+        */
+       pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts);
+       pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
+       pci_read_config_word(mci->pdev, I82860_DERRCTL_STS, &info->derrsyn);
+       pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts2);
+
+       pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
+
+       /*
+        * If the error is the same for both reads then the first set of reads
+        * is valid.  If there is a change then there is a CE no info and the
+        * second set of reads is valid and should be UE info.
+        */
+       if (!(info->errsts2 & 0x0003))
+               return;
+       if ((info->errsts ^ info->errsts2) & 0x0003) {
+               pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
+               pci_read_config_word(mci->pdev, I82860_DERRCTL_STS,
+                   &info->derrsyn);
+       }
+}
+
+static int i82860_process_error_info (struct mem_ctl_info *mci,
+               struct i82860_error_info *info, int handle_errors)
+{
+       int row;
+
+       if (!(info->errsts2 & 0x0003))
+               return 0;
+
+       if (!handle_errors)
+               return 1;
+
+       if ((info->errsts ^ info->errsts2) & 0x0003) {
+               edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+               info->errsts = info->errsts2;
+       }
+
+       info->eap >>= PAGE_SHIFT;
+       row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+       if (info->errsts & 0x0002)
+               edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
+       else
+               edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+                                      0, "i82860 UE");
+
+       return 1;
+}
+
+static void i82860_check(struct mem_ctl_info *mci)
+{
+       struct i82860_error_info info;
+
+       debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+       i82860_get_error_info(mci, &info);
+       i82860_process_error_info(mci, &info, 1);
+}
+
+static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
+{
+       int rc = -ENODEV;
+       int index;
+       struct mem_ctl_info *mci = NULL;
+       unsigned long last_cumul_size;
+
+       u16 mchcfg_ddim;        /* DRAM Data Integrity Mode 0=none,2=edac */
+
+       /* RDRAM has channels but these don't map onto the abstractions that
+          edac uses.
+          The device groups from the GRA registers seem to map reasonably
+          well onto the notion of a chip select row.
+          There are 16 GRA registers and since the name is associated with
+          the channel and the GRA registers map to physical devices so we are
+          going to make 1 channel for group.
+        */
+       mci = edac_mc_alloc(0, 16, 1);
+       if (!mci)
+               return -ENOMEM;
+
+       debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
+
+       mci->pdev = pdev;
+       mci->mtype_cap = MEM_FLAG_DDR;
+
+
+       mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+       /* I"m not sure about this but I think that all RDRAM is SECDED */
+       mci->edac_cap = EDAC_FLAG_SECDED;
+       /* adjust FLAGS */
+
+       mci->mod_name = BS_MOD_STR;
+       mci->mod_ver = "$Revision: 1.1.2.6 $";
+       mci->ctl_name = i82860_devs[dev_idx].ctl_name;
+       mci->edac_check = i82860_check;
+       mci->ctl_page_to_phys = NULL;
+
+       pci_read_config_word(mci->pdev, I82860_MCHCFG, &mchcfg_ddim);
+       mchcfg_ddim = mchcfg_ddim & 0x180;
+
+       /*
+        * The group row boundary (GRA) reg values are boundary address
+        * for each DRAM row with a granularity of 16MB.  GRA regs are
+        * cumulative; therefore GRA15 will contain the total memory contained
+        * in all eight rows.
+        */
+       for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+               u16 value;
+               u32 cumul_size;
+               struct csrow_info *csrow = &mci->csrows[index];
+
+               pci_read_config_word(mci->pdev, I82860_GBA + index * 2,
+                                    &value);
+
+               cumul_size = (value & I82860_GBA_MASK) <<
+                   (I82860_GBA_SHIFT - PAGE_SHIFT);
+               debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
+                       __func__, index, cumul_size);
+               if (cumul_size == last_cumul_size)
+                       continue;       /* not populated */
+
+               csrow->first_page = last_cumul_size;
+               csrow->last_page = cumul_size - 1;
+               csrow->nr_pages = cumul_size - last_cumul_size;
+               last_cumul_size = cumul_size;
+               csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
+               csrow->mtype = MEM_RMBS;
+               csrow->dtype = DEV_UNKNOWN;
+               csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
+       }
+
+       /* clear counters */
+       pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
+
+       if (edac_mc_add_mc(mci)) {
+               debugf3("MC: " __FILE__
+                       ": %s(): failed edac_mc_add_mc()\n",
+                       __func__);
+               edac_mc_free(mci);
+       } else {
+               /* get this far and it's successful */
+               debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+               rc = 0;
+       }
+       return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82860_init_one(struct pci_dev *pdev,
+                                    const struct pci_device_id *ent)
+{
+       int rc;
+
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+       printk(KERN_INFO "i82860 init one\n");
+       if(pci_enable_device(pdev) < 0)
+               return -EIO;
+       rc = i82860_probe1(pdev, ent->driver_data);
+       if(rc == 0)
+               mci_pdev = pci_dev_get(pdev);
+       return rc;
+}
+
+static void __devexit i82860_remove_one(struct pci_dev *pdev)
+{
+       struct mem_ctl_info *mci;
+
+       debugf0(__FILE__ ": %s()\n", __func__);
+
+       mci = edac_mc_find_mci_by_pdev(pdev);
+       if ((mci != NULL) && (edac_mc_del_mc(mci) == 0))
+               edac_mc_free(mci);
+}
+
+static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
+       {PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+        I82860},
+       {0,}                    /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
+
+static struct pci_driver i82860_driver = {
+       .name = BS_MOD_STR,
+       .probe = i82860_init_one,
+       .remove = __devexit_p(i82860_remove_one),
+       .id_table = i82860_pci_tbl,
+};
+
+static int __init i82860_init(void)
+{
+       int pci_rc;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+       if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
+               return pci_rc;
+
+       if (!mci_pdev) {
+               i82860_registered = 0;
+               mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+                                         PCI_DEVICE_ID_INTEL_82860_0, NULL);
+               if (mci_pdev == NULL) {
+                       debugf0("860 pci_get_device fail\n");
+                       return -ENODEV;
+               }
+               pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
+               if (pci_rc < 0) {
+                       debugf0("860 init fail\n");
+                       pci_dev_put(mci_pdev);
+                       return -ENODEV;
+               }
+       }
+       return 0;
+}
+
+static void __exit i82860_exit(void)
+{
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       pci_unregister_driver(&i82860_driver);
+       if (!i82860_registered) {
+               i82860_remove_one(mci_pdev);
+               pci_dev_put(mci_pdev);
+       }
+}
+
+module_init(i82860_init);
+module_exit(i82860_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR
+    ("Red Hat Inc. (http://www.redhat.com.com) Ben Woodard <woodard@redhat.com>");
+MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
new file mode 100644 (file)
index 0000000..009c08f
--- /dev/null
@@ -0,0 +1,532 @@
+/*
+ * Intel D82875P Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Contributors:
+ *     Wang Zhenyu at intel.com
+ *
+ * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_mc.h"
+
+
+#ifndef PCI_DEVICE_ID_INTEL_82875_0
+#define PCI_DEVICE_ID_INTEL_82875_0    0x2578
+#endif                         /* PCI_DEVICE_ID_INTEL_82875_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_82875_6
+#define PCI_DEVICE_ID_INTEL_82875_6    0x257e
+#endif                         /* PCI_DEVICE_ID_INTEL_82875_6 */
+
+
+/* four csrows in dual channel, eight in single channel */
+#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
+
+
+/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
+#define I82875P_EAP            0x58    /* Error Address Pointer (32b)
+                                        *
+                                        * 31:12 block address
+                                        * 11:0  reserved
+                                        */
+
+#define I82875P_DERRSYN                0x5c    /* DRAM Error Syndrome (8b)
+                                        *
+                                        *  7:0  DRAM ECC Syndrome
+                                        */
+
+#define I82875P_DES            0x5d    /* DRAM Error Status (8b)
+                                        *
+                                        *  7:1  reserved
+                                        *  0    Error channel 0/1
+                                        */
+
+#define I82875P_ERRSTS         0xc8    /* Error Status Register (16b)
+                                        *
+                                        * 15:10 reserved
+                                        *  9    non-DRAM lock error (ndlock)
+                                        *  8    Sftwr Generated SMI
+                                        *  7    ECC UE
+                                        *  6    reserved
+                                        *  5    MCH detects unimplemented cycle
+                                        *  4    AGP access outside GA
+                                        *  3    Invalid AGP access
+                                        *  2    Invalid GA translation table
+                                        *  1    Unsupported AGP command
+                                        *  0    ECC CE
+                                        */
+
+#define I82875P_ERRCMD         0xca    /* Error Command (16b)
+                                        *
+                                        * 15:10 reserved
+                                        *  9    SERR on non-DRAM lock
+                                        *  8    SERR on ECC UE
+                                        *  7    SERR on ECC CE
+                                        *  6    target abort on high exception
+                                        *  5    detect unimplemented cyc
+                                        *  4    AGP access outside of GA
+                                        *  3    SERR on invalid AGP access
+                                        *  2    invalid translation table
+                                        *  1    SERR on unsupported AGP command
+                                        *  0    reserved
+                                        */
+
+
+/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
+#define I82875P_PCICMD6                0x04    /* PCI Command Register (16b)
+                                        *
+                                        * 15:10 reserved
+                                        *  9    fast back-to-back - ro 0
+                                        *  8    SERR enable - ro 0
+                                        *  7    addr/data stepping - ro 0
+                                        *  6    parity err enable - ro 0
+                                        *  5    VGA palette snoop - ro 0
+                                        *  4    mem wr & invalidate - ro 0
+                                        *  3    special cycle - ro 0
+                                        *  2    bus master - ro 0
+                                        *  1    mem access dev6 - 0(dis),1(en)
+                                        *  0    IO access dev3 - 0(dis),1(en)
+                                        */
+
+#define I82875P_BAR6           0x10    /* Mem Delays Base ADDR Reg (32b)
+                                        *
+                                        * 31:12 mem base addr [31:12]
+                                        * 11:4  address mask - ro 0
+                                        *  3    prefetchable - ro 0(non),1(pre)
+                                        *  2:1  mem type - ro 0
+                                        *  0    mem space - ro 0
+                                        */
+
+/* Intel 82875p MMIO register space - device 0 function 0 - MMR space */
+
+#define I82875P_DRB_SHIFT 26   /* 64MiB grain */
+#define I82875P_DRB            0x00    /* DRAM Row Boundary (8b x 8)
+                                        *
+                                        *  7    reserved
+                                        *  6:0  64MiB row boundary addr
+                                        */
+
+#define I82875P_DRA            0x10    /* DRAM Row Attribute (4b x 8)
+                                        *
+                                        *  7    reserved
+                                        *  6:4  row attr row 1
+                                        *  3    reserved
+                                        *  2:0  row attr row 0
+                                        *
+                                        * 000 =  4KiB
+                                        * 001 =  8KiB
+                                        * 010 = 16KiB
+                                        * 011 = 32KiB
+                                        */
+
+#define I82875P_DRC            0x68    /* DRAM Controller Mode (32b)
+                                        *
+                                        * 31:30 reserved
+                                        * 29    init complete
+                                        * 28:23 reserved
+                                        * 22:21 nr chan 00=1,01=2
+                                        * 20    reserved
+                                        * 19:18 Data Integ Mode 00=none,01=ecc
+                                        * 17:11 reserved
+                                        * 10:8  refresh mode
+                                        *  7    reserved
+                                        *  6:4  mode select
+                                        *  3:2  reserved
+                                        *  1:0  DRAM type 01=DDR
+                                        */
+
+
+enum i82875p_chips {
+       I82875P = 0,
+};
+
+
+struct i82875p_pvt {
+       struct pci_dev *ovrfl_pdev;
+       void *ovrfl_window;
+};
+
+
+struct i82875p_dev_info {
+       const char *ctl_name;
+};
+
+
+struct i82875p_error_info {
+       u16 errsts;
+       u32 eap;
+       u8 des;
+       u8 derrsyn;
+       u16 errsts2;
+};
+
+
+static const struct i82875p_dev_info i82875p_devs[] = {
+       [I82875P] = {
+                    .ctl_name = "i82875p"},
+};
+
+static struct pci_dev *mci_pdev = NULL;        /* init dev: in case that AGP code
+                                          has already registered driver */
+static int i82875p_registered = 1;
+
+static void i82875p_get_error_info (struct mem_ctl_info *mci,
+               struct i82875p_error_info *info)
+{
+       /*
+        * This is a mess because there is no atomic way to read all the
+        * registers at once and the registers can transition from CE being
+        * overwritten by UE.
+        */
+       pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts);
+       pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
+       pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
+       pci_read_config_byte(mci->pdev, I82875P_DERRSYN, &info->derrsyn);
+       pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts2);
+
+       pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
+
+       /*
+        * If the error is the same then we can for both reads then
+        * the first set of reads is valid.  If there is a change then
+        * there is a CE no info and the second set of reads is valid
+        * and should be UE info.
+        */
+       if (!(info->errsts2 & 0x0081))
+               return;
+       if ((info->errsts ^ info->errsts2) & 0x0081) {
+               pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
+               pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
+               pci_read_config_byte(mci->pdev, I82875P_DERRSYN,
+                   &info->derrsyn);
+       }
+}
+
+static int i82875p_process_error_info (struct mem_ctl_info *mci,
+               struct i82875p_error_info *info, int handle_errors)
+{
+       int row, multi_chan;
+
+       multi_chan = mci->csrows[0].nr_channels - 1;
+
+       if (!(info->errsts2 & 0x0081))
+               return 0;
+
+       if (!handle_errors)
+               return 1;
+
+       if ((info->errsts ^ info->errsts2) & 0x0081) {
+               edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+               info->errsts = info->errsts2;
+       }
+
+       info->eap >>= PAGE_SHIFT;
+       row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+       if (info->errsts & 0x0080)
+               edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
+       else
+               edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+                                      multi_chan ? (info->des & 0x1) : 0,
+                                      "i82875p CE");
+
+       return 1;
+}
+
+
+static void i82875p_check(struct mem_ctl_info *mci)
+{
+       struct i82875p_error_info info;
+
+       debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+       i82875p_get_error_info(mci, &info);
+       i82875p_process_error_info(mci, &info, 1);
+}
+
+
+#ifdef CONFIG_PROC_FS
+extern int pci_proc_attach_device(struct pci_dev *);
+#endif
+
+static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
+{
+       int rc = -ENODEV;
+       int index;
+       struct mem_ctl_info *mci = NULL;
+       struct i82875p_pvt *pvt = NULL;
+       unsigned long last_cumul_size;
+       struct pci_dev *ovrfl_pdev;
+       void __iomem *ovrfl_window = NULL;
+
+       u32 drc;
+       u32 drc_chan;           /* Number of channels 0=1chan,1=2chan */
+       u32 nr_chans;
+       u32 drc_ddim;           /* DRAM Data Integrity Mode 0=none,2=edac */
+
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+       ovrfl_pdev = pci_find_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
+
+       if (!ovrfl_pdev) {
+               /*
+                * Intel tells BIOS developers to hide device 6 which
+                * configures the overflow device access containing
+                * the DRBs - this is where we expose device 6.
+                * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
+                */
+               pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
+               ovrfl_pdev =
+                   pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
+               if (!ovrfl_pdev)
+                       goto fail;
+       }
+#ifdef CONFIG_PROC_FS
+       if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) {
+               printk(KERN_ERR "MC: " __FILE__
+                      ": %s(): Failed to attach overflow device\n",
+                      __func__);
+               goto fail;
+       }
+#endif                         /* CONFIG_PROC_FS */
+       if (pci_enable_device(ovrfl_pdev)) {
+               printk(KERN_ERR "MC: " __FILE__
+                      ": %s(): Failed to enable overflow device\n",
+                      __func__);
+               goto fail;
+       }
+
+       if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) {
+#ifdef CORRECT_BIOS
+               goto fail;
+#endif
+       }
+       /* cache is irrelevant for PCI bus reads/writes */
+       ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0),
+                                      pci_resource_len(ovrfl_pdev, 0));
+
+       if (!ovrfl_window) {
+               printk(KERN_ERR "MC: " __FILE__
+                      ": %s(): Failed to ioremap bar6\n", __func__);
+               goto fail;
+       }
+
+       /* need to find out the number of channels */
+       drc = readl(ovrfl_window + I82875P_DRC);
+       drc_chan = ((drc >> 21) & 0x1);
+       nr_chans = drc_chan + 1;
+       drc_ddim = (drc >> 18) & 0x1;
+
+       mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
+                                nr_chans);
+
+       if (!mci) {
+               rc = -ENOMEM;
+               goto fail;
+       }
+
+       debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__);
+
+       mci->pdev = pdev;
+       mci->mtype_cap = MEM_FLAG_DDR;
+
+       mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+       mci->edac_cap = EDAC_FLAG_UNKNOWN;
+       /* adjust FLAGS */
+
+       mci->mod_name = BS_MOD_STR;
+       mci->mod_ver = "$Revision: 1.5.2.11 $";
+       mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
+       mci->edac_check = i82875p_check;
+       mci->ctl_page_to_phys = NULL;
+
+       debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
+
+       pvt = (struct i82875p_pvt *) mci->pvt_info;
+       pvt->ovrfl_pdev = ovrfl_pdev;
+       pvt->ovrfl_window = ovrfl_window;
+
+       /*
+        * The dram row boundary (DRB) reg values are boundary address
+        * for each DRAM row with a granularity of 32 or 64MB (single/dual
+        * channel operation).  DRB regs are cumulative; therefore DRB7 will
+        * contain the total memory contained in all eight rows.
+        */
+       for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
+               u8 value;
+               u32 cumul_size;
+               struct csrow_info *csrow = &mci->csrows[index];
+
+               value = readb(ovrfl_window + I82875P_DRB + index);
+               cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
+               debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n",
+                       __func__, index, cumul_size);
+               if (cumul_size == last_cumul_size)
+                       continue;       /* not populated */
+
+               csrow->first_page = last_cumul_size;
+               csrow->last_page = cumul_size - 1;
+               csrow->nr_pages = cumul_size - last_cumul_size;
+               last_cumul_size = cumul_size;
+               csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
+               csrow->mtype = MEM_DDR;
+               csrow->dtype = DEV_UNKNOWN;
+               csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+       }
+
+       /* clear counters */
+       pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
+
+       if (edac_mc_add_mc(mci)) {
+               debugf3("MC: " __FILE__
+                       ": %s(): failed edac_mc_add_mc()\n", __func__);
+               goto fail;
+       }
+
+       /* get this far and it's successful */
+       debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+       return 0;
+
+      fail:
+       if (mci)
+               edac_mc_free(mci);
+
+       if (ovrfl_window)
+               iounmap(ovrfl_window);
+
+       if (ovrfl_pdev) {
+               pci_release_regions(ovrfl_pdev);
+               pci_disable_device(ovrfl_pdev);
+       }
+
+       /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
+       return rc;
+}
+
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82875p_init_one(struct pci_dev *pdev,
+                                     const struct pci_device_id *ent)
+{
+       int rc;
+
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+       printk(KERN_INFO "i82875p init one\n");
+       if(pci_enable_device(pdev) < 0)
+               return -EIO;
+       rc = i82875p_probe1(pdev, ent->driver_data);
+       if (mci_pdev == NULL)
+               mci_pdev = pci_dev_get(pdev);
+       return rc;
+}
+
+
+static void __devexit i82875p_remove_one(struct pci_dev *pdev)
+{
+       struct mem_ctl_info *mci;
+       struct i82875p_pvt *pvt = NULL;
+
+       debugf0(__FILE__ ": %s()\n", __func__);
+
+       if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
+               return;
+
+       pvt = (struct i82875p_pvt *) mci->pvt_info;
+       if (pvt->ovrfl_window)
+               iounmap(pvt->ovrfl_window);
+
+       if (pvt->ovrfl_pdev) {
+#ifdef CORRECT_BIOS
+               pci_release_regions(pvt->ovrfl_pdev);
+#endif                         /*CORRECT_BIOS */
+               pci_disable_device(pvt->ovrfl_pdev);
+               pci_dev_put(pvt->ovrfl_pdev);
+       }
+
+       if (edac_mc_del_mc(mci))
+               return;
+
+       edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
+       {PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+        I82875P},
+       {0,}                    /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
+
+
+static struct pci_driver i82875p_driver = {
+       .name = BS_MOD_STR,
+       .probe = i82875p_init_one,
+       .remove = __devexit_p(i82875p_remove_one),
+       .id_table = i82875p_pci_tbl,
+};
+
+
+static int __init i82875p_init(void)
+{
+       int pci_rc;
+
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+       pci_rc = pci_register_driver(&i82875p_driver);
+       if (pci_rc < 0)
+               return pci_rc;
+       if (mci_pdev == NULL) {
+               i82875p_registered = 0;
+               mci_pdev =
+                   pci_get_device(PCI_VENDOR_ID_INTEL,
+                                  PCI_DEVICE_ID_INTEL_82875_0, NULL);
+               if (!mci_pdev) {
+                       debugf0("875p pci_get_device fail\n");
+                       return -ENODEV;
+               }
+               pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
+               if (pci_rc < 0) {
+                       debugf0("875p init fail\n");
+                       pci_dev_put(mci_pdev);
+                       return -ENODEV;
+               }
+       }
+       return 0;
+}
+
+
+static void __exit i82875p_exit(void)
+{
+       debugf3("MC: " __FILE__ ": %s()\n", __func__);
+
+       pci_unregister_driver(&i82875p_driver);
+       if (!i82875p_registered) {
+               i82875p_remove_one(mci_pdev);
+               pci_dev_put(mci_pdev);
+       }
+}
+
+
+module_init(i82875p_init);
+module_exit(i82875p_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
+MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
new file mode 100644 (file)
index 0000000..e908928
--- /dev/null
@@ -0,0 +1,407 @@
+/*
+ * Radisys 82600 Embedded chipset Memory Controller kernel module
+ * (C) 2005 EADS Astrium
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Tim Small <tim@buttersideup.com>, based on work by Thayne
+ * Harbaugh, Dan Hollis <goemon at anime dot net> and others.
+ *
+ * $Id: edac_r82600.c,v 1.1.2.6 2005/10/05 00:43:44 dsp_llnl Exp $
+ *
+ * Written with reference to 82600 High Integration Dual PCI System
+ * Controller Data Book:
+ * http://www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
+ * references to this document given in []
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_mc.h"
+
+/* Radisys say "The 82600 integrates a main memory SDRAM controller that
+ * supports up to four banks of memory. The four banks can support a mix of
+ * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
+ * each of which can be any size from 16MB to 512MB. Both registered (control
+ * signals buffered) and unbuffered DIMM types are supported. Mixing of
+ * registered and unbuffered DIMMs as well as mixing of ECC and non-ECC DIMMs
+ * is not allowed. The 82600 SDRAM interface operates at the same frequency as
+ * the CPU bus, 66MHz, 100MHz or 133MHz."
+ */
+
+#define R82600_NR_CSROWS 4
+#define R82600_NR_CHANS  1
+#define R82600_NR_DIMMS  4
+
+#define R82600_BRIDGE_ID  0x8200
+
+/* Radisys 82600 register addresses - device 0 function 0 - PCI bridge */
+#define R82600_DRAMC   0x57    /* Various SDRAM related control bits
+                                * all bits are R/W
+                                *
+                                * 7    SDRAM ISA Hole Enable
+                                * 6    Flash Page Mode Enable
+                                * 5    ECC Enable: 1=ECC 0=noECC
+                                * 4    DRAM DIMM Type: 1=
+                                * 3    BIOS Alias Disable
+                                * 2    SDRAM BIOS Flash Write Enable
+                                * 1:0  SDRAM Refresh Rate: 00=Disabled
+                                *          01=7.8usec (256Mbit SDRAMs)
+                                *          10=15.6us 11=125usec
+                                */
+
+#define R82600_SDRAMC  0x76    /* "SDRAM Control Register"
+                                * More SDRAM related control bits
+                                * all bits are R/W
+                                *
+                                * 15:8 Reserved.
+                                *
+                                * 7:5  Special SDRAM Mode Select
+                                *
+                                * 4    Force ECC
+                                *
+                                *        1=Drive ECC bits to 0 during
+                                *          write cycles (i.e. ECC test mode)
+                                *
+                                *        0=Normal ECC functioning
+                                *
+                                * 3    Enhanced Paging Enable
+                                *
+                                * 2    CAS# Latency 0=3clks 1=2clks
+                                *
+                                * 1    RAS# to CAS# Delay 0=3 1=2
+                                *
+                                * 0    RAS# Precharge     0=3 1=2
+                                */
+
+#define R82600_EAP     0x80    /* ECC Error Address Pointer Register
+                                *
+                                * 31    Disable Hardware Scrubbing (RW)
+                                *        0=Scrub on corrected read
+                                *        1=Don't scrub on corrected read
+                                *
+                                * 30:12 Error Address Pointer (RO)
+                                *        Upper 19 bits of error address
+                                *
+                                * 11:4  Syndrome Bits (RO)
+                                *
+                                * 3     BSERR# on multibit error (RW)
+                                *        1=enable 0=disable
+                                *
+                                * 2     NMI on Single Bit Eror (RW)
+                                *        1=NMI triggered by SBE n.b. other
+                                *          prerequeists
+                                *        0=NMI not triggered
+                                *
+                                * 1     MBE (R/WC)
+                                *        read 1=MBE at EAP (see above)
+                                *        read 0=no MBE, or SBE occurred first
+                                *        write 1=Clear MBE status (must also
+                                *          clear SBE)
+                                *        write 0=NOP
+                                *
+                                * 1     SBE (R/WC)
+                                *        read 1=SBE at EAP (see above)
+                                *        read 0=no SBE, or MBE occurred first
+                                *        write 1=Clear SBE status (must also
+                                *          clear MBE)
+                                *        write 0=NOP
+                                */
+
+#define R82600_DRBA    0x60    /* + 0x60..0x63 SDRAM Row Boundry Address
+                                *  Registers
+                                *
+                                * 7:0  Address lines 30:24 - upper limit of
+                                * each row [p57]
+                                */
+
+struct r82600_error_info {
+       u32 eapr;
+};
+
+
+static unsigned int disable_hardware_scrub = 0;
+
+
+static void r82600_get_error_info (struct mem_ctl_info *mci,
+               struct r82600_error_info *info)
+{
+       pci_read_config_dword(mci->pdev, R82600_EAP, &info->eapr);
+
+       if (info->eapr & BIT(0))
+               /* Clear error to allow next error to be reported [p.62] */
+               pci_write_bits32(mci->pdev, R82600_EAP,
+                                  ((u32) BIT(0) & (u32) BIT(1)),
+                                  ((u32) BIT(0) & (u32) BIT(1)));
+
+       if (info->eapr & BIT(1))
+               /* Clear error to allow next error to be reported [p.62] */
+               pci_write_bits32(mci->pdev, R82600_EAP,
+                                  ((u32) BIT(0) & (u32) BIT(1)),
+                                  ((u32) BIT(0) & (u32) BIT(1)));
+}
+
+
+static int r82600_process_error_info (struct mem_ctl_info *mci,
+               struct r82600_error_info *info, int handle_errors)
+{
+       int error_found;
+       u32 eapaddr, page;
+       u32 syndrome;
+
+       error_found = 0;
+
+       /* bits 30:12 store the upper 19 bits of the 32 bit error address */
+       eapaddr = ((info->eapr >> 12) & 0x7FFF) << 13;
+       /* Syndrome in bits 11:4 [p.62]       */
+       syndrome = (info->eapr >> 4) & 0xFF;
+
+       /* the R82600 reports at less than page *
+        * granularity (upper 19 bits only)     */
+       page = eapaddr >> PAGE_SHIFT;
+
+       if (info->eapr & BIT(0)) {      /* CE? */
+               error_found = 1;
+
+               if (handle_errors)
+                       edac_mc_handle_ce(
+                           mci, page, 0,       /* not avail */
+                           syndrome,
+                           edac_mc_find_csrow_by_page(mci, page),
+                           0,  /* channel */
+                           mci->ctl_name);
+       }
+
+       if (info->eapr & BIT(1)) {      /* UE? */
+               error_found = 1;
+
+               if (handle_errors)
+                       /* 82600 doesn't give enough info */
+                       edac_mc_handle_ue(mci, page, 0,
+                           edac_mc_find_csrow_by_page(mci, page),
+                           mci->ctl_name);
+       }
+
+       return error_found;
+}
+
+static void r82600_check(struct mem_ctl_info *mci)
+{
+       struct r82600_error_info info;
+
+       debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+       r82600_get_error_info(mci, &info);
+       r82600_process_error_info(mci, &info, 1);
+}
+
+static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
+{
+       int rc = -ENODEV;
+       int index;
+       struct mem_ctl_info *mci = NULL;
+       u8 dramcr;
+       u32 ecc_on;
+       u32 reg_sdram;
+       u32 eapr;
+       u32 scrub_disabled;
+       u32 sdram_refresh_rate;
+       u32 row_high_limit_last = 0;
+       u32 eap_init_bits;
+
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+
+       pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
+       pci_read_config_dword(pdev, R82600_EAP, &eapr);
+
+       ecc_on = dramcr & BIT(5);
+       reg_sdram = dramcr & BIT(4);
+       scrub_disabled = eapr & BIT(31);
+       sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
+
+       debugf2("MC: " __FILE__ ": %s(): sdram refresh rate = %#0x\n",
+               __func__, sdram_refresh_rate);
+
+       debugf2("MC: " __FILE__ ": %s(): DRAMC register = %#0x\n", __func__,
+               dramcr);
+
+       mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
+
+       if (mci == NULL) {
+               rc = -ENOMEM;
+               goto fail;
+       }
+
+       debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+       mci->pdev = pdev;
+       mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
+
+       mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+       /* FIXME try to work out if the chip leads have been                 *
+        * used for COM2 instead on this board? [MA6?]       MAYBE:          */
+
+       /* On the R82600, the pins for memory bits 72:65 - i.e. the   *
+        * EC bits are shared with the pins for COM2 (!), so if COM2  *
+        * is enabled, we assume COM2 is wired up, and thus no EDAC   *
+        * is possible.                                               */
+       mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+       if (ecc_on) {
+               if (scrub_disabled)
+                       debugf3("MC: " __FILE__ ": %s(): mci = %p - "
+                               "Scrubbing disabled! EAP: %#0x\n", __func__,
+                               mci, eapr);
+       } else
+               mci->edac_cap = EDAC_FLAG_NONE;
+
+       mci->mod_name = BS_MOD_STR;
+       mci->mod_ver = "$Revision: 1.1.2.6 $";
+       mci->ctl_name = "R82600";
+       mci->edac_check = r82600_check;
+       mci->ctl_page_to_phys = NULL;
+
+       for (index = 0; index < mci->nr_csrows; index++) {
+               struct csrow_info *csrow = &mci->csrows[index];
+               u8 drbar;       /* sDram Row Boundry Address Register */
+               u32 row_high_limit;
+               u32 row_base;
+
+               /* find the DRAM Chip Select Base address and mask */
+               pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar);
+
+               debugf1("MC%d: " __FILE__ ": %s() Row=%d DRBA = %#0x\n",
+                       mci->mc_idx, __func__, index, drbar);
+
+               row_high_limit = ((u32) drbar << 24);
+/*             row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
+
+               debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
+                       "Boundry Address=%#0x, Last = %#0x \n",
+                       mci->mc_idx, __func__, index, row_high_limit,
+                       row_high_limit_last);
+
+               /* Empty row [p.57] */
+               if (row_high_limit == row_high_limit_last)
+                       continue;
+
+               row_base = row_high_limit_last;
+
+               csrow->first_page = row_base >> PAGE_SHIFT;
+               csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
+               csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+               /* Error address is top 19 bits - so granularity is      *
+                * 14 bits                                               */
+               csrow->grain = 1 << 14;
+               csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
+               /* FIXME - check that this is unknowable with this chipset */
+               csrow->dtype = DEV_UNKNOWN;
+
+               /* Mode is global on 82600 */
+               csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
+               row_high_limit_last = row_high_limit;
+       }
+
+       /* clear counters */
+       /* FIXME should we? */
+
+       if (edac_mc_add_mc(mci)) {
+               debugf3("MC: " __FILE__
+                       ": %s(): failed edac_mc_add_mc()\n", __func__);
+               goto fail;
+       }
+
+       /* get this far and it's successful */
+
+       /* Clear error flags to allow next error to be reported [p.62] */
+       /* Test systems seem to always have the UE flag raised on boot */
+
+       eap_init_bits = BIT(0) & BIT(1);
+       if (disable_hardware_scrub) {
+               eap_init_bits |= BIT(31);
+               debugf3("MC: " __FILE__ ": %s(): Disabling Hardware Scrub "
+                       "(scrub on error)\n", __func__);
+       }
+
+       pci_write_bits32(mci->pdev, R82600_EAP, eap_init_bits,
+                        eap_init_bits);
+
+       debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+       return 0;
+
+fail:
+       if (mci)
+               edac_mc_free(mci);
+
+       return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit r82600_init_one(struct pci_dev *pdev,
+                                    const struct pci_device_id *ent)
+{
+       debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+       /* don't need to call pci_device_enable() */
+       return r82600_probe1(pdev, ent->driver_data);
+}
+
+
+static void __devexit r82600_remove_one(struct pci_dev *pdev)
+{
+       struct mem_ctl_info *mci;
+
+       debugf0(__FILE__ ": %s()\n", __func__);
+
+       if (((mci = edac_mc_find_mci_by_pdev(pdev)) != NULL) &&
+           !edac_mc_del_mc(mci))
+               edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
+       {PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)},
+       {0,}                    /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
+
+
+static struct pci_driver r82600_driver = {
+       .name = BS_MOD_STR,
+       .probe = r82600_init_one,
+       .remove = __devexit_p(r82600_remove_one),
+       .id_table = r82600_pci_tbl,
+};
+
+
+static int __init r82600_init(void)
+{
+       return pci_register_driver(&r82600_driver);
+}
+
+
+static void __exit r82600_exit(void)
+{
+       pci_unregister_driver(&r82600_driver);
+}
+
+
+module_init(r82600_init);
+module_exit(r82600_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
+             "on behalf of EADS Astrium");
+MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
+
+module_param(disable_hardware_scrub, bool, 0644);
+MODULE_PARM_DESC(disable_hardware_scrub,
+                "If set, disable the chipset's automatic scrub for CEs");
index dd8c6a9ffc762a2bf8e071ced39cfa467b6920a9..b45a45ca7cc961c6152cb702cde8ae768b8b156e 100644 (file)
@@ -29,9 +29,6 @@
 #ifdef CONFIG_ARCH_OMAP
 #include <asm/arch/gpio.h>
 #endif
-
-#else
-#define        set_irq_type(irq,type)  do{}while(0)
 #endif
 
 
@@ -509,14 +506,14 @@ static int __devinit ads7846_probe(struct spi_device *spi)
        ts->msg.complete = ads7846_rx;
        ts->msg.context = ts;
 
-       if (request_irq(spi->irq, ads7846_irq, SA_SAMPLE_RANDOM,
-                               spi->dev.bus_id, ts)) {
+       if (request_irq(spi->irq, ads7846_irq,
+                       SA_SAMPLE_RANDOM | SA_TRIGGER_FALLING,
+                       spi->dev.bus_id, ts)) {
                dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq);
                input_unregister_device(&ts->input);
                kfree(ts);
                return -EBUSY;
        }
-       set_irq_type(spi->irq, IRQT_FALLING);
 
        dev_info(&spi->dev, "touchscreen, irq %d\n", spi->irq);
 
index ca99979c868acbc38d752f83ee37a5553f12e2e6..8b3515f394a6ef93c115f00073bea02c071707ac 100644 (file)
@@ -8,6 +8,7 @@
  * completion notification.
  */
 
+#include <asm/types.h>
 #include <asm/atomic.h>
 
 #include <linux/blkdev.h>
index 8a2e2657f4c28c0c304cbe1859861c3f2190d5f8..33ace373241cb4b80997b60bb52b29dc46c87f8b 100644 (file)
@@ -29,6 +29,8 @@
 #  For mptctl:
 #CFLAGS_mptctl.o += -DMPT_DEBUG_IOCTL
 #
+#  For mptfc:
+#CFLAGS_mptfc.o += -DMPT_DEBUG_FC
 
 #=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-} LSI_LOGIC
 
index d890b2b8a93e94973f345068f95b75e9121b679d..9a2c7605d49c930a45c10aaad9b3cc772dd2805e 100644 (file)
@@ -81,6 +81,10 @@ MODULE_LICENSE("GPL");
 /*
  *  cmd line parameters
  */
+static int mpt_msi_enable;
+module_param(mpt_msi_enable, int, 0);
+MODULE_PARM_DESC(mpt_msi_enable, " MSI Support Enable (default=0)");
+
 #ifdef MFCNT
 static int mfcounter = 0;
 #define PRINT_MF_COUNT 20000
@@ -174,7 +178,7 @@ static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
 static int     ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers);
 static void    mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
 static void    mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
-static void    mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info);
+static void    mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
 static void    mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info);
 
 /* module entry point */
@@ -313,7 +317,7 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa)
                if (ioc->bus_type == FC)
                        mpt_fc_log_info(ioc, log_info);
                else if (ioc->bus_type == SPI)
-                       mpt_sp_log_info(ioc, log_info);
+                       mpt_spi_log_info(ioc, log_info);
                else if (ioc->bus_type == SAS)
                        mpt_sas_log_info(ioc, log_info);
        }
@@ -1444,6 +1448,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
 
        ioc->pci_irq = -1;
        if (pdev->irq) {
+               if (mpt_msi_enable && !pci_enable_msi(pdev))
+                       printk(MYIOC_s_INFO_FMT "PCI-MSI enabled\n", ioc->name);
+
                r = request_irq(pdev->irq, mpt_interrupt, SA_SHIRQ, ioc->name, ioc);
 
                if (r < 0) {
@@ -1483,6 +1490,10 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
 
                list_del(&ioc->list);
                free_irq(ioc->pci_irq, ioc);
+               if (mpt_msi_enable)
+                       pci_disable_msi(pdev);
+               if (ioc->alt_ioc)
+                       ioc->alt_ioc->alt_ioc = NULL;
                iounmap(mem);
                kfree(ioc);
                pci_set_drvdata(pdev, NULL);
@@ -2136,6 +2147,8 @@ mpt_adapter_dispose(MPT_ADAPTER *ioc)
 
        if (ioc->pci_irq != -1) {
                free_irq(ioc->pci_irq, ioc);
+               if (mpt_msi_enable)
+                       pci_disable_msi(ioc->pcidev);
                ioc->pci_irq = -1;
        }
 
@@ -2157,6 +2170,10 @@ mpt_adapter_dispose(MPT_ADAPTER *ioc)
        sz_last = ioc->alloc_total;
        dprintk((KERN_INFO MYNAM ": %s: free'd %d of %d bytes\n",
                        ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first));
+
+       if (ioc->alt_ioc)
+               ioc->alt_ioc->alt_ioc = NULL;
+
        kfree(ioc);
 }
 
@@ -2770,13 +2787,16 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
 
        /* RAID FW may take a long time to enable
         */
-       if ( (ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
-                       > MPI_FW_HEADER_PID_PROD_TARGET_SCSI ) {
-               rc = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
-                               reply_sz, (u16*)&reply_buf, 300 /*seconds*/, sleepFlag);
+       if (((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
+           > MPI_FW_HEADER_PID_PROD_TARGET_SCSI) ||
+           (ioc->bus_type == SAS)) {
+               rc = mpt_handshake_req_reply_wait(ioc, req_sz,
+               (u32*)&port_enable, reply_sz, (u16*)&reply_buf,
+               300 /*seconds*/, sleepFlag);
        } else {
-               rc = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
-                               reply_sz, (u16*)&reply_buf, 30 /*seconds*/, sleepFlag);
+               rc = mpt_handshake_req_reply_wait(ioc, req_sz,
+               (u32*)&port_enable, reply_sz, (u16*)&reply_buf,
+               30 /*seconds*/, sleepFlag);
        }
        return rc;
 }
@@ -4386,6 +4406,138 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
        return 0;
 }
 
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+static void
+mptbase_raid_process_event_data(MPT_ADAPTER *ioc,
+    MpiEventDataRaid_t * pRaidEventData)
+{
+       int     volume;
+       int     reason;
+       int     disk;
+       int     status;
+       int     flags;
+       int     state;
+
+       volume  = pRaidEventData->VolumeID;
+       reason  = pRaidEventData->ReasonCode;
+       disk    = pRaidEventData->PhysDiskNum;
+       status  = le32_to_cpu(pRaidEventData->SettingsStatus);
+       flags   = (status >> 0) & 0xff;
+       state   = (status >> 8) & 0xff;
+
+       if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
+               return;
+       }
+
+       if ((reason >= MPI_EVENT_RAID_RC_PHYSDISK_CREATED &&
+            reason <= MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED) ||
+           (reason == MPI_EVENT_RAID_RC_SMART_DATA)) {
+               printk(MYIOC_s_INFO_FMT "RAID STATUS CHANGE for PhysDisk %d\n",
+                       ioc->name, disk);
+       } else {
+               printk(MYIOC_s_INFO_FMT "RAID STATUS CHANGE for VolumeID %d\n",
+                       ioc->name, volume);
+       }
+
+       switch(reason) {
+       case MPI_EVENT_RAID_RC_VOLUME_CREATED:
+               printk(MYIOC_s_INFO_FMT "  volume has been created\n",
+                       ioc->name);
+               break;
+
+       case MPI_EVENT_RAID_RC_VOLUME_DELETED:
+
+               printk(MYIOC_s_INFO_FMT "  volume has been deleted\n",
+                       ioc->name);
+               break;
+
+       case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
+               printk(MYIOC_s_INFO_FMT "  volume settings have been changed\n",
+                       ioc->name);
+               break;
+
+       case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
+               printk(MYIOC_s_INFO_FMT "  volume is now %s%s%s%s\n",
+                       ioc->name,
+                       state == MPI_RAIDVOL0_STATUS_STATE_OPTIMAL
+                        ? "optimal"
+                        : state == MPI_RAIDVOL0_STATUS_STATE_DEGRADED
+                         ? "degraded"
+                         : state == MPI_RAIDVOL0_STATUS_STATE_FAILED
+                          ? "failed"
+                          : "state unknown",
+                       flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED
+                        ? ", enabled" : "",
+                       flags & MPI_RAIDVOL0_STATUS_FLAG_QUIESCED
+                        ? ", quiesced" : "",
+                       flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
+                        ? ", resync in progress" : "" );
+               break;
+
+       case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
+               printk(MYIOC_s_INFO_FMT "  volume membership of PhysDisk %d has changed\n",
+                       ioc->name, disk);
+               break;
+
+       case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
+               printk(MYIOC_s_INFO_FMT "  PhysDisk has been created\n",
+                       ioc->name);
+               break;
+
+       case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
+               printk(MYIOC_s_INFO_FMT "  PhysDisk has been deleted\n",
+                       ioc->name);
+               break;
+
+       case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
+               printk(MYIOC_s_INFO_FMT "  PhysDisk settings have been changed\n",
+                       ioc->name);
+               break;
+
+       case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
+               printk(MYIOC_s_INFO_FMT "  PhysDisk is now %s%s%s\n",
+                       ioc->name,
+                       state == MPI_PHYSDISK0_STATUS_ONLINE
+                        ? "online"
+                        : state == MPI_PHYSDISK0_STATUS_MISSING
+                         ? "missing"
+                         : state == MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE
+                          ? "not compatible"
+                          : state == MPI_PHYSDISK0_STATUS_FAILED
+                           ? "failed"
+                           : state == MPI_PHYSDISK0_STATUS_INITIALIZING
+                            ? "initializing"
+                            : state == MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED
+                             ? "offline requested"
+                             : state == MPI_PHYSDISK0_STATUS_FAILED_REQUESTED
+                              ? "failed requested"
+                              : state == MPI_PHYSDISK0_STATUS_OTHER_OFFLINE
+                               ? "offline"
+                               : "state unknown",
+                       flags & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
+                        ? ", out of sync" : "",
+                       flags & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED
+                        ? ", quiesced" : "" );
+               break;
+
+       case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
+               printk(MYIOC_s_INFO_FMT "  Domain Validation needed for PhysDisk %d\n",
+                       ioc->name, disk);
+               break;
+
+       case MPI_EVENT_RAID_RC_SMART_DATA:
+               printk(MYIOC_s_INFO_FMT "  SMART data received, ASC/ASCQ = %02xh/%02xh\n",
+                       ioc->name, pRaidEventData->ASC, pRaidEventData->ASCQ);
+               break;
+
+       case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
+               printk(MYIOC_s_INFO_FMT "  replacement of PhysDisk %d has started\n",
+                       ioc->name, disk);
+               break;
+       }
+}
+
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*
  *     GetIoUnitPage2 - Retrieve BIOS version and boot order information.
@@ -4598,6 +4750,14 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
                                SCSIPortPage2_t *pPP2 = (SCSIPortPage2_t  *) pbuf;
                                MpiDeviceInfo_t *pdevice = NULL;
 
+                               /*
+                                * Save "Set to Avoid SCSI Bus Resets" flag
+                                */
+                               ioc->spi_data.bus_reset =
+                                   (le32_to_cpu(pPP2->PortFlags) &
+                               MPI_SCSIPORTPAGE2_PORT_FLAGS_AVOID_SCSI_RESET) ?
+                                   0 : 1 ;
+
                                /* Save the Port Page 2 data
                                 * (reformat into a 32bit quantity)
                                 */
@@ -5967,6 +6127,10 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
                        }
                }
                break;
+       case MPI_EVENT_INTEGRATED_RAID:
+               mptbase_raid_process_event_data(ioc,
+                   (MpiEventDataRaid_t *)pEventReply->Data);
+               break;
        default:
                break;
        }
@@ -6046,7 +6210,7 @@ mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*
- *     mpt_sp_log_info - Log information returned from SCSI Parallel IOC.
+ *     mpt_spi_log_info - Log information returned from SCSI Parallel IOC.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @mr: Pointer to MPT reply frame
  *     @log_info: U32 LogInfo word from the IOC
@@ -6054,7 +6218,7 @@ mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
  *     Refer to lsi/sp_log.h.
  */
 static void
-mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
+mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
 {
        u32 info = log_info & 0x00FF0000;
        char *desc = "unknown";
index 47053ac65068566fa6675fa2a5ed08eb8199684e..ea2649ecad1fcccb9f19e33cc8bf1cce9276bd96 100644 (file)
@@ -76,8 +76,8 @@
 #define COPYRIGHT      "Copyright (c) 1999-2005 " MODULEAUTHOR
 #endif
 
-#define MPT_LINUX_VERSION_COMMON       "3.03.06"
-#define MPT_LINUX_PACKAGE_NAME         "@(#)mptlinux-3.03.06"
+#define MPT_LINUX_VERSION_COMMON       "3.03.07"
+#define MPT_LINUX_PACKAGE_NAME         "@(#)mptlinux-3.03.07"
 #define WHAT_MAGIC_STRING              "@" "(" "#" ")"
 
 #define show_mptmod_ver(s,ver)  \
 #define  MPT_MAX_FRAME_SIZE            128
 #define  MPT_DEFAULT_FRAME_SIZE                128
 
-#define  MPT_REPLY_FRAME_SIZE          0x40  /* Must be a multiple of 8 */
+#define  MPT_REPLY_FRAME_SIZE          0x50  /* Must be a multiple of 8 */
 
 #define  MPT_SG_REQ_128_SCALE          1
 #define  MPT_SG_REQ_96_SCALE           2
@@ -510,9 +510,10 @@ struct mptfc_rport_info
 {
        struct list_head list;
        struct fc_rport *rport;
-       VirtDevice      *vdev;
+       struct scsi_target *starget;
        FCDevicePage0_t pg0;
        u8              flags;
+       u8              remap_needed;
 };
 
 /*
@@ -631,6 +632,7 @@ typedef struct _MPT_ADAPTER
        struct mutex             sas_topology_mutex;
        MPT_SAS_MGMT             sas_mgmt;
        int                      num_ports;
+       struct work_struct       mptscsih_persistTask;
 
        struct list_head         fc_rports;
        spinlock_t               fc_rport_lock; /* list and ri flags */
@@ -803,6 +805,12 @@ typedef struct _mpt_sge {
 #define dreplyprintk(x)
 #endif
 
+#ifdef DMPT_DEBUG_FC
+#define dfcprintk(x) printk x
+#else
+#define dfcprintk(x)
+#endif
+
 #ifdef MPT_DEBUG_TM
 #define dtmprintk(x) printk x
 #define DBG_DUMP_TM_REQUEST_FRAME(mfp) \
index b102c7666d0efcaf44f8a6e534ff340352b23489..c3a3499bce2ae8c3ece665ce11f98b6027a1cd83 100644 (file)
@@ -93,10 +93,11 @@ static int  mptfcDoneCtx = -1;
 static int     mptfcTaskCtx = -1;
 static int     mptfcInternalCtx = -1; /* Used only for internal commands */
 
-int mptfc_slave_alloc(struct scsi_device *device);
+static int mptfc_target_alloc(struct scsi_target *starget);
+static int mptfc_slave_alloc(struct scsi_device *sdev);
 static int mptfc_qcmd(struct scsi_cmnd *SCpnt,
-    void (*done)(struct scsi_cmnd *));
-
+                     void (*done)(struct scsi_cmnd *));
+static void mptfc_target_destroy(struct scsi_target *starget);
 static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout);
 static void __devexit mptfc_remove(struct pci_dev *pdev);
 
@@ -107,10 +108,10 @@ static struct scsi_host_template mptfc_driver_template = {
        .name                           = "MPT FC Host",
        .info                           = mptscsih_info,
        .queuecommand                   = mptfc_qcmd,
-       .target_alloc                   = mptscsih_target_alloc,
+       .target_alloc                   = mptfc_target_alloc,
        .slave_alloc                    = mptfc_slave_alloc,
        .slave_configure                = mptscsih_slave_configure,
-       .target_destroy                 = mptscsih_target_destroy,
+       .target_destroy                 = mptfc_target_destroy,
        .slave_destroy                  = mptscsih_slave_destroy,
        .change_queue_depth             = mptscsih_change_queue_depth,
        .eh_abort_handler               = mptscsih_abort,
@@ -347,15 +348,34 @@ mptfc_generate_rport_ids(FCDevicePage0_t *pg0, struct fc_rport_identifiers *rid)
        return 0;
 }
 
+static void
+mptfc_remap_sdev(struct scsi_device *sdev, void *arg)
+{
+       VirtDevice              *vdev;
+       VirtTarget              *vtarget;
+       struct scsi_target      *starget;
+
+       starget = scsi_target(sdev);
+       if (starget->hostdata == arg) {
+               vtarget = arg;
+               vdev = sdev->hostdata;
+               if (vdev) {
+                       vdev->bus_id = vtarget->bus_id;
+                       vdev->target_id = vtarget->target_id;
+               }
+       }
+}
+
 static void
 mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
 {
        struct fc_rport_identifiers rport_ids;
        struct fc_rport         *rport;
        struct mptfc_rport_info *ri;
-       int                     match = 0;
-       u64                     port_name;
+       int                     new_ri = 1;
+       u64                     pn;
        unsigned long           flags;
+       VirtTarget              *vtarget;
 
        if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0)
                return;
@@ -363,14 +383,14 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
        /* scan list looking for a match */
        spin_lock_irqsave(&ioc->fc_rport_lock, flags);
        list_for_each_entry(ri, &ioc->fc_rports, list) {
-               port_name = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
-               if (port_name == rport_ids.port_name) { /* match */
+               pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
+               if (pn == rport_ids.port_name) {        /* match */
                        list_move_tail(&ri->list, &ioc->fc_rports);
-                       match = 1;
+                       new_ri = 0;
                        break;
                }
        }
-       if (!match) {   /* allocate one */
+       if (new_ri) {   /* allocate one */
                spin_unlock_irqrestore(&ioc->fc_rport_lock, flags);
                ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL);
                if (!ri)
@@ -382,40 +402,43 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
        ri->pg0 = *pg0; /* add/update pg0 data */
        ri->flags &= ~MPT_RPORT_INFO_FLAGS_MISSING;
 
+       /* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */
        if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) {
                ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED;
                spin_unlock_irqrestore(&ioc->fc_rport_lock, flags);
-               rport = fc_remote_port_add(ioc->sh,channel, &rport_ids);
+               rport = fc_remote_port_add(ioc->sh, channel, &rport_ids);
                spin_lock_irqsave(&ioc->fc_rport_lock, flags);
                if (rport) {
-                       if (*((struct mptfc_rport_info **)rport->dd_data) != ri) {
-                               ri->flags &= ~MPT_RPORT_INFO_FLAGS_MAPPED_VDEV;
-                               ri->vdev = NULL;
-                               ri->rport = rport;
-                               *((struct mptfc_rport_info **)rport->dd_data) = ri;
-                       }
-                       rport->dev_loss_tmo = mptfc_dev_loss_tmo;
+                       ri->rport = rport;
+                       if (new_ri) /* may have been reset by user */
+                               rport->dev_loss_tmo = mptfc_dev_loss_tmo;
+                       *((struct mptfc_rport_info **)rport->dd_data) = ri;
                        /*
                         * if already mapped, remap here.  If not mapped,
-                        * slave_alloc will allocate vdev and map
+                        * target_alloc will allocate vtarget and map,
+                        * slave_alloc will fill in vdev from vtarget.
                         */
-                       if (ri->flags & MPT_RPORT_INFO_FLAGS_MAPPED_VDEV) {
-                               ri->vdev->target_id = ri->pg0.CurrentTargetID;
-                               ri->vdev->bus_id = ri->pg0.CurrentBus;
-                               ri->vdev->vtarget->target_id = ri->vdev->target_id;
-                               ri->vdev->vtarget->bus_id = ri->vdev->bus_id;
+                       if (ri->starget) {
+                               vtarget = ri->starget->hostdata;
+                               if (vtarget) {
+                                       vtarget->target_id = pg0->CurrentTargetID;
+                                       vtarget->bus_id = pg0->CurrentBus;
+                                       starget_for_each_device(ri->starget,
+                                               vtarget,mptfc_remap_sdev);
+                               }
+                               ri->remap_needed = 0;
                        }
-                       #ifdef MPT_DEBUG
-                       printk ("mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, "
+                       dfcprintk ((MYIOC_s_INFO_FMT
+                               "mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, "
                                "rport tid %d, tmo %d\n",
-                                       ioc->sh->host_no,
+                                       ioc->name,
+                                       oc->sh->host_no,
                                        pg0->PortIdentifier,
                                        pg0->WWNN,
                                        pg0->WWPN,
                                        pg0->CurrentTargetID,
                                        ri->rport->scsi_target_id,
-                                       ri->rport->dev_loss_tmo);
-                       #endif
+                                       ri->rport->dev_loss_tmo));
                } else {
                        list_del(&ri->list);
                        kfree(ri);
@@ -426,6 +449,65 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
 
 }
 
+/*
+ *     OS entry point to allow for host driver to free allocated memory
+ *     Called if no device present or device being unloaded
+ */
+static void
+mptfc_target_destroy(struct scsi_target *starget)
+{
+       struct fc_rport         *rport;
+       struct mptfc_rport_info *ri;
+
+       rport = starget_to_rport(starget);
+       if (rport) {
+               ri = *((struct mptfc_rport_info **)rport->dd_data);
+               if (ri) /* better be! */
+                       ri->starget = NULL;
+       }
+       if (starget->hostdata)
+               kfree(starget->hostdata);
+       starget->hostdata = NULL;
+}
+
+/*
+ *     OS entry point to allow host driver to alloc memory
+ *     for each scsi target. Called once per device the bus scan.
+ *     Return non-zero if allocation fails.
+ */
+static int
+mptfc_target_alloc(struct scsi_target *starget)
+{
+       VirtTarget              *vtarget;
+       struct fc_rport         *rport;
+       struct mptfc_rport_info *ri;
+       int                     rc;
+
+       vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL);
+       if (!vtarget)
+               return -ENOMEM;
+       starget->hostdata = vtarget;
+
+       rc = -ENODEV;
+       rport = starget_to_rport(starget);
+       if (rport) {
+               ri = *((struct mptfc_rport_info **)rport->dd_data);
+               if (ri) {       /* better be! */
+                       vtarget->target_id = ri->pg0.CurrentTargetID;
+                       vtarget->bus_id = ri->pg0.CurrentBus;
+                       ri->starget = starget;
+                       ri->remap_needed = 0;
+                       rc = 0;
+               }
+       }
+       if (rc != 0) {
+               kfree(vtarget);
+               starget->hostdata = NULL;
+       }
+
+       return rc;
+}
+
 /*
  *     OS entry point to allow host driver to alloc memory
  *     for each scsi device. Called once per device the bus scan.
@@ -440,7 +522,6 @@ mptfc_slave_alloc(struct scsi_device *sdev)
        VirtDevice              *vdev;
        struct scsi_target      *starget;
        struct fc_rport         *rport;
-       struct mptfc_rport_info *ri;
        unsigned long           flags;
 
 
@@ -451,55 +532,44 @@ mptfc_slave_alloc(struct scsi_device *sdev)
 
        hd = (MPT_SCSI_HOST *)sdev->host->hostdata;
 
-       vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
+       vdev = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
        if (!vdev) {
                printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
                                hd->ioc->name, sizeof(VirtDevice));
                return -ENOMEM;
        }
-       memset(vdev, 0, sizeof(VirtDevice));
 
        spin_lock_irqsave(&hd->ioc->fc_rport_lock,flags);
 
-       if (!(ri = *((struct mptfc_rport_info **)rport->dd_data))) {
-               spin_unlock_irqrestore(&hd->ioc->fc_rport_lock,flags);
-               kfree(vdev);
-               return -ENODEV;
-       }
-
        sdev->hostdata = vdev;
        starget = scsi_target(sdev);
        vtarget = starget->hostdata;
+
        if (vtarget->num_luns == 0) {
+               vtarget->ioc_id = hd->ioc->id;
                vtarget->tflags = MPT_TARGET_FLAGS_Q_YES |
                                  MPT_TARGET_FLAGS_VALID_INQUIRY;
                hd->Targets[sdev->id] = vtarget;
        }
 
-       vtarget->target_id = vdev->target_id;
-       vtarget->bus_id = vdev->bus_id;
-
        vdev->vtarget = vtarget;
        vdev->ioc_id = hd->ioc->id;
        vdev->lun = sdev->lun;
-       vdev->target_id = ri->pg0.CurrentTargetID;
-       vdev->bus_id = ri->pg0.CurrentBus;
-
-       ri->flags |= MPT_RPORT_INFO_FLAGS_MAPPED_VDEV;
-       ri->vdev = vdev;
+       vdev->target_id = vtarget->target_id;
+       vdev->bus_id = vtarget->bus_id;
 
        spin_unlock_irqrestore(&hd->ioc->fc_rport_lock,flags);
 
        vtarget->num_luns++;
 
-#ifdef MPT_DEBUG
-       printk ("mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, "
+       dfcprintk ((MYIOC_s_INFO_FMT
+               "mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, "
                "CurrentTargetID %d, %x %llx %llx\n",
-                       sdev->host->host_no,
-                       vtarget->num_luns,
-                       sdev->id, ri->pg0.CurrentTargetID,
-                       ri->pg0.PortIdentifier, ri->pg0.WWPN, ri->pg0.WWNN);
-#endif
+               ioc->name,
+               sdev->host->host_no,
+               vtarget->num_luns,
+               sdev->id, ri->pg0.CurrentTargetID,
+               ri->pg0.PortIdentifier, ri->pg0.WWPN, ri->pg0.WWNN));
 
        return 0;
 }
@@ -507,6 +577,7 @@ mptfc_slave_alloc(struct scsi_device *sdev)
 static int
 mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
 {
+       struct mptfc_rport_info *ri;
        struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device));
        int             err;
 
@@ -516,6 +587,10 @@ mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
                done(SCpnt);
                return 0;
        }
+       ri = *((struct mptfc_rport_info **)rport->dd_data);
+       if (unlikely(ri->remap_needed))
+               return SCSI_MLQUEUE_HOST_BUSY;
+
        return mptscsih_qcmd(SCpnt,done);
 }
 
@@ -591,16 +666,20 @@ mptfc_rescan_devices(void *arg)
 
                                ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
                                               MPT_RPORT_INFO_FLAGS_MISSING);
+                               ri->remap_needed = 1;
                                fc_remote_port_delete(ri->rport);
                                /*
                                 * remote port not really deleted 'cause
                                 * binding is by WWPN and driver only
-                                * registers FCP_TARGETs
+                                * registers FCP_TARGETs but cannot trust
+                                * data structures.
                                 */
-                               #ifdef MPT_DEBUG
-                               printk ("mptfc_rescan.%d: %llx deleted\n",
-                                       ioc->sh->host_no, ri->pg0.WWPN);
-                               #endif
+                               ri->rport = NULL;
+                               dfcprintk ((MYIOC_s_INFO_FMT
+                                       "mptfc_rescan.%d: %llx deleted\n",
+                                       ioc->name,
+                                       ioc->sh->host_no,
+                                       ri->pg0.WWPN));
                        }
                }
                spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
@@ -872,9 +951,8 @@ mptfc_init(void)
        }
 
        error = pci_register_driver(&mptfc_driver);
-       if (error) {
+       if (error)
                fc_release_transport(mptfc_transport_template);
-       }
 
        return error;
 }
@@ -885,7 +963,8 @@ mptfc_init(void)
  *     @pdev: Pointer to pci_dev structure
  *
  */
-static void __devexit mptfc_remove(struct pci_dev *pdev)
+static void __devexit
+mptfc_remove(struct pci_dev *pdev)
 {
        MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
        struct mptfc_rport_info *p, *n;
index 5a06d8d8694eb3cff0322e479a089ffb5d7021c1..2512d0e6155ede3b55f1996b13fe69b6b78eda0a 100644 (file)
@@ -89,6 +89,8 @@ static int    mptsasMgmtCtx = -1;
 enum mptsas_hotplug_action {
        MPTSAS_ADD_DEVICE,
        MPTSAS_DEL_DEVICE,
+       MPTSAS_ADD_RAID,
+       MPTSAS_DEL_RAID,
 };
 
 struct mptsas_hotplug_event {
@@ -114,6 +116,7 @@ struct mptsas_hotplug_event {
 
 struct mptsas_devinfo {
        u16     handle;         /* unique id to address this device */
+       u16     handle_parent;  /* unique id to address parent device */
        u8      phy_id;         /* phy number of parent device */
        u8      port_id;        /* sas physical port this device
                                   is assoc'd with */
@@ -301,9 +304,8 @@ mptsas_slave_alloc(struct scsi_device *sdev)
        }
        mutex_unlock(&hd->ioc->sas_topology_mutex);
 
-       printk("No matching SAS device found!!\n");
        kfree(vdev);
-       return -ENODEV;
+       return -ENXIO;
 
  out:
        vtarget->ioc_id = vdev->ioc_id;
@@ -321,6 +323,7 @@ mptsas_slave_destroy(struct scsi_device *sdev)
        struct sas_rphy *rphy;
        struct mptsas_portinfo *p;
        int i;
+       VirtDevice *vdev;
 
        /*
         * Handle hotplug removal case.
@@ -344,8 +347,29 @@ mptsas_slave_destroy(struct scsi_device *sdev)
  out:
        mutex_unlock(&hd->ioc->sas_topology_mutex);
        /*
-        * TODO: Issue target reset to flush firmware outstanding commands.
+        * Issue target reset to flush firmware outstanding commands.
         */
+       vdev = sdev->hostdata;
+       if (vdev->configured_lun){
+               if (mptscsih_TMHandler(hd,
+                    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+                    vdev->bus_id,
+                    vdev->target_id,
+                    0, 0, 5 /* 5 second timeout */)
+                    < 0){
+
+                       /* The TM request failed!
+                        * Fatal error case.
+                        */
+                       printk(MYIOC_s_WARN_FMT
+                      "Error processing TaskMgmt id=%d TARGET_RESET\n",
+                               hd->ioc->name,
+                               vdev->target_id);
+
+                       hd->tmPending = 0;
+                       hd->tmState = TM_STATE_NONE;
+               }
+       }
        mptscsih_slave_destroy(sdev);
 }
 
@@ -714,6 +738,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
        mptsas_print_device_pg0(buffer);
 
        device_info->handle = le16_to_cpu(buffer->DevHandle);
+       device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle);
        device_info->phy_id = buffer->PhyNum;
        device_info->port_id = buffer->PhysicalPort;
        device_info->id = buffer->TargetID;
@@ -863,6 +888,26 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
        return error;
 }
 
+/*
+ * Returns true if there is a scsi end device
+ */
+static inline int
+mptsas_is_end_device(struct mptsas_devinfo * attached)
+{
+       if ((attached->handle) &&
+           (attached->device_info &
+           MPI_SAS_DEVICE_INFO_END_DEVICE) &&
+           ((attached->device_info &
+           MPI_SAS_DEVICE_INFO_SSP_TARGET) |
+           (attached->device_info &
+           MPI_SAS_DEVICE_INFO_STP_TARGET) |
+           (attached->device_info &
+           MPI_SAS_DEVICE_INFO_SATA_DEVICE)))
+               return 1;
+       else
+               return 0;
+}
+
 static void
 mptsas_parse_device_info(struct sas_identify *identify,
                struct mptsas_devinfo *device_info)
@@ -1227,7 +1272,7 @@ mptsas_find_phyinfo_by_parent(MPT_ADAPTER *ioc, u16 parent_handle, u8 phy_id)
 }
 
 static struct mptsas_phyinfo *
-mptsas_find_phyinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
+mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u32 id)
 {
        struct mptsas_portinfo *port_info;
        struct mptsas_phyinfo *phy_info = NULL;
@@ -1239,12 +1284,12 @@ mptsas_find_phyinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
         */
        mutex_lock(&ioc->sas_topology_mutex);
        list_for_each_entry(port_info, &ioc->sas_topology, list) {
-               for (i = 0; i < port_info->num_phys; i++) {
-                       if (port_info->phy_info[i].attached.handle == handle) {
-                               phy_info = &port_info->phy_info[i];
-                               break;
-                       }
-               }
+               for (i = 0; i < port_info->num_phys; i++)
+                       if (mptsas_is_end_device(&port_info->phy_info[i].attached))
+                               if (port_info->phy_info[i].attached.id == id) {
+                                       phy_info = &port_info->phy_info[i];
+                                       break;
+                               }
        }
        mutex_unlock(&ioc->sas_topology_mutex);
 
@@ -1258,36 +1303,58 @@ mptsas_hotplug_work(void *arg)
        MPT_ADAPTER *ioc = ev->ioc;
        struct mptsas_phyinfo *phy_info;
        struct sas_rphy *rphy;
+       struct scsi_device *sdev;
        char *ds = NULL;
-
-       if (ev->device_info & MPI_SAS_DEVICE_INFO_SSP_TARGET)
-               ds = "ssp";
-       if (ev->device_info & MPI_SAS_DEVICE_INFO_STP_TARGET)
-               ds = "stp";
-       if (ev->device_info & MPI_SAS_DEVICE_INFO_SATA_DEVICE)
-               ds = "sata";
+       struct mptsas_devinfo sas_device;
 
        switch (ev->event_type) {
        case MPTSAS_DEL_DEVICE:
-               printk(MYIOC_s_INFO_FMT
-                      "removing %s device, channel %d, id %d, phy %d\n",
-                      ioc->name, ds, ev->channel, ev->id, ev->phy_id);
 
-               phy_info = mptsas_find_phyinfo_by_handle(ioc, ev->handle);
+               phy_info = mptsas_find_phyinfo_by_target(ioc, ev->id);
                if (!phy_info) {
                        printk("mptsas: remove event for non-existant PHY.\n");
                        break;
                }
 
+               if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_TARGET)
+                       ds = "ssp";
+               if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_STP_TARGET)
+                       ds = "stp";
+               if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SATA_DEVICE)
+                       ds = "sata";
+
+               printk(MYIOC_s_INFO_FMT
+                      "removing %s device, channel %d, id %d, phy %d\n",
+                      ioc->name, ds, ev->channel, ev->id, phy_info->phy_id);
+
                if (phy_info->rphy) {
                        sas_rphy_delete(phy_info->rphy);
                        phy_info->rphy = NULL;
                }
                break;
        case MPTSAS_ADD_DEVICE:
-               printk(MYIOC_s_INFO_FMT
-                      "attaching %s device, channel %d, id %d, phy %d\n",
-                      ioc->name, ds, ev->channel, ev->id, ev->phy_id);
+
+               /*
+                * When there is no sas address,
+                * RAID volumes are being deleted,
+                * and hidden phy disk are being added.
+                * We don't know the SAS data yet,
+                * so lookup sas device page to get
+                * pertaining info
+                */
+               if (!ev->sas_address) {
+                       if (mptsas_sas_device_pg0(ioc,
+                           &sas_device, ev->id,
+                           (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+                            MPI_SAS_DEVICE_PGAD_FORM_SHIFT)))
+                               break;
+                       ev->handle = sas_device.handle;
+                       ev->parent_handle = sas_device.handle_parent;
+                       ev->channel = sas_device.channel;
+                       ev->phy_id = sas_device.phy_id;
+                       ev->sas_address = sas_device.sas_address;
+                       ev->device_info = sas_device.device_info;
+               }
 
                phy_info = mptsas_find_phyinfo_by_parent(ioc,
                                ev->parent_handle, ev->phy_id);
@@ -1310,10 +1377,23 @@ mptsas_hotplug_work(void *arg)
                phy_info->attached.sas_address = ev->sas_address;
                phy_info->attached.device_info = ev->device_info;
 
+               if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_TARGET)
+                       ds = "ssp";
+               if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_STP_TARGET)
+                       ds = "stp";
+               if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SATA_DEVICE)
+                       ds = "sata";
+
+               printk(MYIOC_s_INFO_FMT
+                      "attaching %s device, channel %d, id %d, phy %d\n",
+                      ioc->name, ds, ev->channel, ev->id, ev->phy_id);
+
+
                rphy = sas_rphy_alloc(phy_info->phy);
                if (!rphy)
                        break; /* non-fatal: an rphy can be added later */
 
+               rphy->scsi_target_id = phy_info->attached.id;
                mptsas_parse_device_info(&rphy->identify, &phy_info->attached);
                if (sas_rphy_add(rphy)) {
                        sas_rphy_free(rphy);
@@ -1322,6 +1402,40 @@ mptsas_hotplug_work(void *arg)
 
                phy_info->rphy = rphy;
                break;
+       case MPTSAS_ADD_RAID:
+               sdev = scsi_device_lookup(
+                       ioc->sh,
+                       ioc->num_ports,
+                       ev->id,
+                       0);
+               if (sdev) {
+                       scsi_device_put(sdev);
+                       break;
+               }
+               printk(MYIOC_s_INFO_FMT
+                      "attaching device, channel %d, id %d\n",
+                      ioc->name, ioc->num_ports, ev->id);
+               scsi_add_device(ioc->sh,
+                       ioc->num_ports,
+                       ev->id,
+                       0);
+               mpt_findImVolumes(ioc);
+               break;
+       case MPTSAS_DEL_RAID:
+               sdev = scsi_device_lookup(
+                       ioc->sh,
+                       ioc->num_ports,
+                       ev->id,
+                       0);
+               if (!sdev)
+                       break;
+               printk(MYIOC_s_INFO_FMT
+                      "removing device, channel %d, id %d\n",
+                      ioc->name, ioc->num_ports, ev->id);
+               scsi_remove_device(sdev);
+               scsi_device_put(sdev);
+               mpt_findImVolumes(ioc);
+               break;
        }
 
        kfree(ev);
@@ -1372,23 +1486,94 @@ mptscsih_send_sas_event(MPT_ADAPTER *ioc,
        schedule_work(&ev->work);
 }
 
+static void
+mptscsih_send_raid_event(MPT_ADAPTER *ioc,
+               EVENT_DATA_RAID *raid_event_data)
+{
+       struct mptsas_hotplug_event *ev;
+       RAID_VOL0_STATUS * volumeStatus;
+
+       if (ioc->bus_type != SAS)
+               return;
+
+       ev = kmalloc(sizeof(*ev), GFP_ATOMIC);
+       if (!ev) {
+               printk(KERN_WARNING "mptsas: lost hotplug event\n");
+               return;
+       }
+
+       memset(ev,0,sizeof(struct mptsas_hotplug_event));
+       INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
+       ev->ioc = ioc;
+       ev->id = raid_event_data->VolumeID;
+
+       switch (raid_event_data->ReasonCode) {
+       case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
+               ev->event_type = MPTSAS_ADD_DEVICE;
+               break;
+       case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
+               ev->event_type = MPTSAS_DEL_DEVICE;
+               break;
+       case MPI_EVENT_RAID_RC_VOLUME_DELETED:
+               ev->event_type = MPTSAS_DEL_RAID;
+               break;
+       case MPI_EVENT_RAID_RC_VOLUME_CREATED:
+               ev->event_type = MPTSAS_ADD_RAID;
+               break;
+       case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
+               volumeStatus = (RAID_VOL0_STATUS *) &
+                   raid_event_data->SettingsStatus;
+               ev->event_type = (volumeStatus->State ==
+                   MPI_RAIDVOL0_STATUS_STATE_FAILED) ?
+                   MPTSAS_DEL_RAID : MPTSAS_ADD_RAID;
+               break;
+       default:
+               break;
+       }
+       schedule_work(&ev->work);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* work queue thread to clear the persitency table */
+static void
+mptscsih_sas_persist_clear_table(void * arg)
+{
+       MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+
+       mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
+}
+
 static int
 mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
 {
+       int rc=1;
        u8 event = le32_to_cpu(reply->Event) & 0xFF;
 
        if (!ioc->sh)
-               return 1;
+               goto out;
 
        switch (event) {
        case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
                mptscsih_send_sas_event(ioc,
                        (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data);
-               return 1;               /* currently means nothing really */
-
+               break;
+       case MPI_EVENT_INTEGRATED_RAID:
+               mptscsih_send_raid_event(ioc,
+                       (EVENT_DATA_RAID *)reply->Data);
+               break;
+       case MPI_EVENT_PERSISTENT_TABLE_FULL:
+               INIT_WORK(&ioc->mptscsih_persistTask,
+                   mptscsih_sas_persist_clear_table,
+                   (void *)ioc);
+               schedule_work(&ioc->mptscsih_persistTask);
+               break;
        default:
-               return mptscsih_event_process(ioc, reply);
+               rc = mptscsih_event_process(ioc, reply);
+               break;
        }
+ out:
+
+       return rc;
 }
 
 static int
index cdac5578fdf220caefb48a3356710393558372ff..05789e50546491df632331c58b6b5ff3e867f4fa 100644 (file)
@@ -144,7 +144,6 @@ static int  mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd);
 static int     mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout );
 static u32     SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc);
 
-static int     mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout);
 static int     mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout);
 
 int            mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
@@ -159,11 +158,9 @@ static int mptscsih_writeIOCPage4(MPT_SCSI_HOST *hd, int target_id, int bus);
 int            mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
 static int     mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
 static void    mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
-static void    mptscsih_negotiate_to_asyn_narrow(MPT_SCSI_HOST *hd, VirtTarget *vtarget);
+static void    mptscsih_negotiate_to_asyn_narrow(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
 static int     mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id);
 
-static struct work_struct   mptscsih_persistTask;
-
 #ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
 static int     mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io);
 static void    mptscsih_domainValidation(void *hd);
@@ -563,11 +560,24 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
        MPT_SCSI_HOST   *hd;
        SCSIIORequest_t *pScsiReq;
        SCSIIOReply_t   *pScsiReply;
-       u16              req_idx;
+       u16              req_idx, req_idx_MR;
 
        hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
 
        req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+       req_idx_MR = (mr != NULL) ?
+           le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx;
+       if ((req_idx != req_idx_MR) ||
+           (mf->u.frame.linkage.arg1 == 0xdeadbeaf)) {
+               printk(MYIOC_s_ERR_FMT "Received a mf that was already freed\n",
+                   ioc->name);
+               printk (MYIOC_s_ERR_FMT
+                   "req_idx=%x req_idx_MR=%x mf=%p mr=%p sc=%p\n",
+                   ioc->name, req_idx, req_idx_MR, mf, mr,
+                   hd->ScsiLookup[req_idx_MR]);
+               return 0;
+       }
+
        sc = hd->ScsiLookup[req_idx];
        if (sc == NULL) {
                MPIHeader_t *hdr = (MPIHeader_t *)mf;
@@ -730,6 +740,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
 
                        break;
 
+               case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:           /* 0x0044 */
+                       sc->resid=0;
                case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:        /* 0x0040 */
                case MPI_IOCSTATUS_SUCCESS:                     /* 0x0000 */
                        if (scsi_status == MPI_SCSI_STATUS_BUSY)
@@ -789,7 +801,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
                case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:      /* 0x0006 */
                case MPI_IOCSTATUS_INVALID_FIELD:               /* 0x0007 */
                case MPI_IOCSTATUS_INVALID_STATE:               /* 0x0008 */
-               case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:           /* 0x0044 */
                case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:          /* 0x0046 */
                case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:       /* 0x004A */
                default:
@@ -1530,7 +1541,7 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
  *
  *     Returns 0 for SUCCESS or -1 if FAILED.
  */
-static int
+int
 mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout)
 {
        MPT_ADAPTER     *ioc;
@@ -1721,6 +1732,20 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun
        return retval;
 }
 
+static int
+mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
+{
+       switch (ioc->bus_type) {
+       case FC:
+               return 40;
+       case SAS:
+               return 10;
+       case SPI:
+       default:
+               return 2;
+       }
+}
+
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *     mptscsih_abort - Abort linux scsi_cmnd routine, new_eh variant
@@ -1792,7 +1817,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
        vdev = SCpnt->device->hostdata;
        retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
                vdev->bus_id, vdev->target_id, vdev->lun,
-               ctx2abort, 2 /* 2 second timeout */);
+               ctx2abort, mptscsih_get_tm_timeout(ioc));
 
        printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
                hd->ioc->name,
@@ -1843,7 +1868,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
        vdev = SCpnt->device->hostdata;
        retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
                vdev->bus_id, vdev->target_id,
-               0, 0, 5 /* 5 second timeout */);
+               0, 0, mptscsih_get_tm_timeout(hd->ioc));
 
        printk (KERN_WARNING MYNAM ": %s: target reset: %s (sc=%p)\n",
                hd->ioc->name,
@@ -1893,7 +1918,7 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
 
        vdev = SCpnt->device->hostdata;
        retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
-               vdev->bus_id, 0, 0, 0, 5 /* 5 second timeout */);
+               vdev->bus_id, 0, 0, 0, mptscsih_get_tm_timeout(hd->ioc));
 
        printk (KERN_WARNING MYNAM ": %s: bus reset: %s (sc=%p)\n",
                hd->ioc->name,
@@ -2015,6 +2040,42 @@ mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout )
        return status;
 }
 
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static void
+mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
+{
+       char *desc;
+
+       switch (response_code) {
+       case MPI_SCSITASKMGMT_RSP_TM_COMPLETE:
+               desc = "The task completed.";
+               break;
+       case MPI_SCSITASKMGMT_RSP_INVALID_FRAME:
+               desc = "The IOC received an invalid frame status.";
+               break;
+       case MPI_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
+               desc = "The task type is not supported.";
+               break;
+       case MPI_SCSITASKMGMT_RSP_TM_FAILED:
+               desc = "The requested task failed.";
+               break;
+       case MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED:
+               desc = "The task completed successfully.";
+               break;
+       case MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN:
+               desc = "The LUN request is invalid.";
+               break;
+       case MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
+               desc = "The task is in the IOC queue and has not been sent to target.";
+               break;
+       default:
+               desc = "unknown";
+               break;
+       }
+       printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n",
+               ioc->name, response_code, desc);
+}
+
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *     mptscsih_taskmgmt_complete - Registered with Fusion MPT base driver
@@ -2064,6 +2125,11 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *m
                /* Figure out if this was ABORT_TASK, TARGET_RESET, or BUS_RESET! */
                tmType = pScsiTmReq->TaskType;
 
+               if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 &&
+                   pScsiTmReply->ResponseCode)
+                       mptscsih_taskmgmt_response_code(ioc,
+                           pScsiTmReply->ResponseCode);
+
                dtmprintk((MYIOC_s_WARN_FMT "  TaskType = %d, TerminationCount=%d\n",
                                ioc->name, tmType, le32_to_cpu(pScsiTmReply->TerminationCount)));
                DBG_DUMP_TM_REPLY_FRAME((u32 *)pScsiTmReply);
@@ -2255,7 +2321,7 @@ mptscsih_slave_destroy(struct scsi_device *sdev)
        vtarget->luns[0] &= ~(1 << vdevice->lun);
        vtarget->num_luns--;
        if (vtarget->num_luns == 0) {
-               mptscsih_negotiate_to_asyn_narrow(hd, vtarget);
+               mptscsih_negotiate_to_asyn_narrow(hd, vdevice);
                if (hd->ioc->bus_type == SPI) {
                        if (mptscsih_is_phys_disk(hd->ioc, vtarget->target_id)) {
                                hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3;
@@ -2584,16 +2650,6 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
        return 1;               /* currently means nothing really */
 }
 
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/* work queue thread to clear the persitency table */
-static void
-mptscsih_sas_persist_clear_table(void * arg)
-{
-       MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
-
-       mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
-}
-
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 int
 mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
@@ -2656,13 +2712,6 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
                break;
        }
 
-       /* Persistent table is full. */
-       case MPI_EVENT_PERSISTENT_TABLE_FULL:
-               INIT_WORK(&mptscsih_persistTask,
-                   mptscsih_sas_persist_clear_table,(void *)ioc);
-               schedule_work(&mptscsih_persistTask);
-               break;
-
        case MPI_EVENT_NONE:                            /* 00 */
        case MPI_EVENT_LOG_DATA:                        /* 01 */
        case MPI_EVENT_STATE_CHANGE:                    /* 02 */
@@ -3863,8 +3912,9 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
  *
  */
 static void
-mptscsih_negotiate_to_asyn_narrow(MPT_SCSI_HOST *hd, VirtTarget *vtarget)
+mptscsih_negotiate_to_asyn_narrow(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
 {
+       VirtTarget              *vtarget = vdevice->vtarget;
        MPT_ADAPTER             *ioc= hd->ioc;
        SCSIDevicePage1_t       *pcfg1Data;
        CONFIGPARMS              cfg;
@@ -3874,7 +3924,8 @@ mptscsih_negotiate_to_asyn_narrow(MPT_SCSI_HOST *hd, VirtTarget *vtarget)
        int                      requested, configuration, data,i;
        u8                       flags, factor;
 
-       if (ioc->bus_type != SPI)
+       if ((ioc->bus_type != SPI) ||
+               (!vdevice->configured_lun))
                return;
 
        if (!ioc->spi_data.sdp1length)
@@ -3910,7 +3961,7 @@ mptscsih_negotiate_to_asyn_narrow(MPT_SCSI_HOST *hd, VirtTarget *vtarget)
                        }
                        mptscsih_setDevicePage1Flags(0, MPT_ASYNC, 0, &requested,
                                &configuration, flags);
-                       dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC "
+                       dnegoprintk(("nego asyn narrow: id=%d width=0 factor=MPT_ASYNC "
                                "offset=0 negoFlags=%x request=%x config=%x\n",
                                id, flags, requested, configuration));
                        pcfg1Data->RequestedParameters = cpu_to_le32(requested);
@@ -3923,7 +3974,7 @@ mptscsih_negotiate_to_asyn_narrow(MPT_SCSI_HOST *hd, VirtTarget *vtarget)
                flags = vtarget->negoFlags;
                mptscsih_setDevicePage1Flags(0, MPT_ASYNC, 0, &requested,
                                &configuration, flags);
-               dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC "
+               dnegoprintk(("nego asyn narrow: id=%d width=0 factor=MPT_ASYNC "
                        "offset=0 negoFlags=%x request=%x config=%x\n",
                        vtarget->target_id, flags, requested, configuration));
                pcfg1Data->RequestedParameters = cpu_to_le32(requested);
@@ -5620,5 +5671,6 @@ EXPORT_SYMBOL(mptscsih_event_process);
 EXPORT_SYMBOL(mptscsih_ioc_reset);
 EXPORT_SYMBOL(mptscsih_change_queue_depth);
 EXPORT_SYMBOL(mptscsih_timer_expired);
+EXPORT_SYMBOL(mptscsih_TMHandler);
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
index d3cba12f4bd95c38c110327bfbde76d9d6ecf1af..44b248d51ea3174ff33d34024ac50b43e6bec672 100644 (file)
@@ -108,3 +108,4 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE
 extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
 extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
 extern void mptscsih_timer_expired(unsigned long data);
+extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout);
index 7dce29277cb748e51e1ef8a3956f618cddc28818..f148dfa39117edb097bb5f481dcb9c8bad05bd60 100644 (file)
@@ -384,6 +384,14 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto out_mptspi_probe;
        }
 
+       /*
+        * issue internal bus reset
+        */
+       if (ioc->spi_data.bus_reset)
+               mptscsih_TMHandler(hd,
+                   MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+                   0, 0, 0, 0, 5);
+
        scsi_scan_host(sh);
        return 0;
 
@@ -445,7 +453,7 @@ static void __exit
 mptspi_exit(void)
 {
        pci_unregister_driver(&mptspi_driver);
-       
+
        mpt_reset_deregister(mptspiDoneCtx);
        dprintk((KERN_INFO MYNAM
          ": Deregistered for IOC reset notifications\n"));
index 7e98434cfa37783b1cfa45c1bd162e9a56f14bad..9783caf49696a8cf4cf74a77d907758c59ba846f 100644 (file)
@@ -50,7 +50,7 @@ void ibmasm_register_uart(struct service_processor *sp)
        memset(&uport, 0, sizeof(struct uart_port));
        uport.irq       = sp->irq;
        uport.uartclk   = 3686400;
-       uport.flags     = UPF_AUTOPROBE | UPF_SHARE_IRQ;
+       uport.flags     = UPF_SHARE_IRQ;
        uport.iotype    = UPIO_MEM;
        uport.membase   = iomem_base;
 
index 1421941487c45d3d0d68e5e25b2ff6a6e67b905b..6a6a08441804b0bd1d902f6e2d691976d5471e9b 100644 (file)
@@ -7,6 +7,7 @@ menu "Network device support"
 
 config NETDEVICES
        depends on NET
+       default y if UML
        bool "Network device support"
        ---help---
          You can say N here if you don't intend to connect your Linux box to
@@ -1914,6 +1915,15 @@ config E1000_NAPI
 
          If in doubt, say N.
 
+config E1000_DISABLE_PACKET_SPLIT
+       bool "Disable Packet Split for PCI express adapters"
+       depends on E1000
+       help
+         Say Y here if you want to use the legacy receive path for PCI express
+         hadware.
+
+         If in doubt, say N.
+
 source "drivers/net/ixp2000/Kconfig"
 
 config MYRI_SBUS
@@ -2024,13 +2034,28 @@ config SKGE
          It does not support the link failover and network management 
          features that "portable" vendor supplied sk98lin driver does.
 
+         This driver supports adapters based on the original Yukon chipset:
+         Marvell 88E8001, Belkin F5D5005, CNet GigaCard, DLink DGE-530T,
+         Linksys EG1032/EG1064, 3Com 3C940/3C940B, SysKonnect SK-9871/9872.
+
+         It does not support the newer Yukon2 chipset: a separate driver,
+         sky2, is provided for Yukon2-based adapters.
+
+         To compile this driver as a module, choose M here: the module
+         will be called skge.  This is recommended.
 
 config SKY2
        tristate "SysKonnect Yukon2 support (EXPERIMENTAL)"
        depends on PCI && EXPERIMENTAL
        select CRC32
        ---help---
-         This driver support the Marvell Yukon 2 Gigabit Ethernet adapter.
+         This driver supports Gigabit Ethernet adapters based on the the
+         Marvell Yukon 2 chipset:
+         Marvell 88E8021/88E8022/88E8035/88E8036/88E8038/88E8050/88E8052/
+         88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21
+
+         This driver does not support the original Yukon chipset: a seperate
+         driver, skge, is provided for Yukon-based adapters.
 
          To compile this driver as a module, choose M here: the module
          will be called sky2.  This is recommended.
@@ -2040,8 +2065,15 @@ config SK98LIN
        depends on PCI
        ---help---
          Say Y here if you have a Marvell Yukon or SysKonnect SK-98xx/SK-95xx
-         compliant Gigabit Ethernet Adapter. The following adapters are supported
-         by this driver:
+         compliant Gigabit Ethernet Adapter.
+
+         This driver supports the original Yukon chipset. A cleaner driver is 
+         also available (skge) which seems to work better than this one.
+
+         This driver does not support the newer Yukon2 chipset. A seperate
+         driver, sky2, is provided to support Yukon2-based adapters.
+
+         The following adapters are supported by this driver:
            - 3Com 3C940 Gigabit LOM Ethernet Adapter
            - 3Com 3C941 Gigabit LOM Ethernet Adapter
            - Allied Telesyn AT-2970LX Gigabit Ethernet Adapter
index b8953de5664a4cdf091374af819525518414a6cd..b508812e97acc2bb0d632b0a49bb5f120a0439a6 100644 (file)
@@ -1002,6 +1002,8 @@ static int __devinit ace_init(struct net_device *dev)
 
        mac1 = 0;
        for(i = 0; i < 4; i++) {
+               int tmp;
+
                mac1 = mac1 << 8;
                tmp = read_eeprom_byte(dev, 0x8c+i);
                if (tmp < 0) {
@@ -1012,6 +1014,8 @@ static int __devinit ace_init(struct net_device *dev)
        }
        mac2 = 0;
        for(i = 4; i < 8; i++) {
+               int tmp;
+
                mac2 = mac2 << 8;
                tmp = read_eeprom_byte(dev, 0x8c+i);
                if (tmp < 0) {
index 7aa49b974dc5510cb17a46099186ea43335ab22b..c3267e4e1bb02d6c7bf552b1fc818aa4308fa0c2 100644 (file)
@@ -1399,7 +1399,6 @@ static int b44_open(struct net_device *dev)
        b44_init_rings(bp);
        b44_init_hw(bp);
 
-       netif_carrier_off(dev);
        b44_check_phy(bp);
 
        err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
@@ -1464,7 +1463,7 @@ static int b44_close(struct net_device *dev)
 #endif
        b44_halt(bp);
        b44_free_rings(bp);
-       netif_carrier_off(bp->dev);
+       netif_carrier_off(dev);
 
        spin_unlock_irq(&bp->lock);
 
@@ -2000,6 +1999,8 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
        dev->irq = pdev->irq;
        SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
 
+       netif_carrier_off(dev);
+
        err = b44_get_invariants(bp);
        if (err) {
                printk(KERN_ERR PFX "Problem fetching invariants of chip, "
@@ -2136,7 +2137,7 @@ static int __init b44_init(void)
 
        /* Setup paramaters for syncing RX/TX DMA descriptors */
        dma_desc_align_mask = ~(dma_desc_align_size - 1);
-       dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc));
+       dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
 
        return pci_module_init(&b44_driver);
 }
index 2582d98ef5c3308c16214b75f520ec1abb1b97e4..4ff006c37626b92c8e80085480fc01f090da1cdb 100644 (file)
@@ -576,7 +576,7 @@ static int bond_update_speed_duplex(struct slave *slave)
        slave->duplex = DUPLEX_FULL;
 
        if (slave_dev->ethtool_ops) {
-               u32 res;
+               int res;
 
                if (!slave_dev->ethtool_ops->get_settings) {
                        return -1;
index 1f7ca453bb4a28c48a5e44da3024df4b8facdb5c..6e295fce5c6f08c3a1554ea87c9d9be18e86e4cc 100644 (file)
@@ -335,6 +335,30 @@ static inline void cas_mask_intr(struct cas *cp)
                cas_disable_irq(cp, i);
 }
 
+static inline void cas_buffer_init(cas_page_t *cp)
+{
+       struct page *page = cp->buffer;
+       atomic_set((atomic_t *)&page->lru.next, 1);
+}
+
+static inline int cas_buffer_count(cas_page_t *cp)
+{
+       struct page *page = cp->buffer;
+       return atomic_read((atomic_t *)&page->lru.next);
+}
+
+static inline void cas_buffer_inc(cas_page_t *cp)
+{
+       struct page *page = cp->buffer;
+       atomic_inc((atomic_t *)&page->lru.next);
+}
+
+static inline void cas_buffer_dec(cas_page_t *cp)
+{
+       struct page *page = cp->buffer;
+       atomic_dec((atomic_t *)&page->lru.next);
+}
+
 static void cas_enable_irq(struct cas *cp, const int ring)
 {
        if (ring == 0) { /* all but TX_DONE */
@@ -472,6 +496,7 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
 {
        pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, 
                       PCI_DMA_FROMDEVICE);
+       cas_buffer_dec(page);
        __free_pages(page->buffer, cp->page_order);
        kfree(page);
        return 0;
@@ -501,6 +526,7 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
        page->buffer = alloc_pages(flags, cp->page_order);
        if (!page->buffer)
                goto page_err;
+       cas_buffer_init(page);
        page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
                                      cp->page_size, PCI_DMA_FROMDEVICE);
        return page;
@@ -579,7 +605,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
        list_for_each_safe(elem, tmp, &list) {
                cas_page_t *page = list_entry(elem, cas_page_t, list);
 
-               if (page_count(page->buffer) > 1) 
+               if (cas_buffer_count(page) > 1)
                        continue;
 
                list_del(elem);
@@ -1347,7 +1373,7 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
        cas_page_t *page = cp->rx_pages[1][index];
        cas_page_t *new;
 
-       if (page_count(page->buffer) == 1)
+       if (cas_buffer_count(page) == 1)
                return page;
 
        new = cas_page_dequeue(cp);
@@ -1367,7 +1393,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
        cas_page_t **page1 = cp->rx_pages[1];
 
        /* swap if buffer is in use */
-       if (page_count(page0[index]->buffer) > 1) {
+       if (cas_buffer_count(page0[index]) > 1) {
                cas_page_t *new = cas_page_spare(cp, index);
                if (new) {
                        page1[index] = page0[index];
@@ -1925,8 +1951,8 @@ static void cas_tx(struct net_device *dev, struct cas *cp,
        u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
 #endif
        if (netif_msg_intr(cp))
-               printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n",
-                       cp->dev->name, status, compwb);
+               printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
+                       cp->dev->name, status, (unsigned long long)compwb);
        /* process all the rings */
        for (ring = 0; ring < N_TX_RINGS; ring++) {
 #ifdef USE_TX_COMPWB
@@ -2039,6 +2065,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
                skb->len      += hlen - swivel;
 
                get_page(page->buffer);
+               cas_buffer_inc(page);
                frag->page = page->buffer;
                frag->page_offset = off;
                frag->size = hlen - swivel;
@@ -2063,6 +2090,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
                        frag++;
 
                        get_page(page->buffer);
+                       cas_buffer_inc(page);
                        frag->page = page->buffer;
                        frag->page_offset = 0;
                        frag->size = hlen;
@@ -2225,7 +2253,7 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
        released = 0;
        while (entry != last) {
                /* make a new buffer if it's still in use */
-               if (page_count(page[entry]->buffer) > 1) {
+               if (cas_buffer_count(page[entry]) > 1) {
                        cas_page_t *new = cas_page_dequeue(cp);
                        if (!new) {
                                /* let the timer know that we need to 
index 4726722a063539a9736bf8305b8bb43603ca299e..bf1fd2b98bf897f5ea252caaa5856c25908ccc42 100644 (file)
@@ -1,25 +1,25 @@
 /*******************************************************************************
 
-  
+
   Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
-  
-  This program is free software; you can redistribute it and/or modify it 
-  under the terms of the GNU General Public License as published by the Free 
-  Software Foundation; either version 2 of the License, or (at your option) 
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of the GNU General Public License as published by the Free
+  Software Foundation; either version 2 of the License, or (at your option)
   any later version.
-  
-  This program is distributed in the hope that it will be useful, but WITHOUT 
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
-  
+
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc., 59 
+  this program; if not, write to the Free Software Foundation, Inc., 59
   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
-  
+
   The full GNU General Public License is included in this distribution in the
   file called LICENSE.
-  
+
   Contact Information:
   Linux NICS <linux.nics@intel.com>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 #define DRV_NAME               "e100"
 #define DRV_EXT                "-NAPI"
-#define DRV_VERSION            "3.4.14-k4"DRV_EXT
+#define DRV_VERSION            "3.5.10-k2"DRV_EXT
 #define DRV_DESCRIPTION                "Intel(R) PRO/100 Network Driver"
 #define DRV_COPYRIGHT          "Copyright(c) 1999-2005 Intel Corporation"
 #define PFX                    DRV_NAME ": "
@@ -320,7 +320,7 @@ enum cuc_dump {
        cuc_dump_complete       = 0x0000A005,
        cuc_dump_reset_complete = 0x0000A007,
 };
-               
+
 enum port {
        software_reset  = 0x0000,
        selftest        = 0x0001,
@@ -715,10 +715,10 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
                ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
                writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
                e100_write_flush(nic); udelay(4);
-               
+
                writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
                e100_write_flush(nic); udelay(4);
-               
+
                /* Eeprom drives a dummy zero to EEDO after receiving
                 * complete address.  Use this to adjust addr_len. */
                ctrl = readb(&nic->csr->eeprom_ctrl_lo);
@@ -726,7 +726,7 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
                        *addr_len -= (i - 16);
                        i = 17;
                }
-               
+
                data = (data << 1) | (ctrl & eedo ? 1 : 0);
        }
 
@@ -1170,7 +1170,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
 }
 
-static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 {
 /* *INDENT-OFF* */
        static struct {
@@ -1213,13 +1213,13 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 *  driver can change the algorithm.
 *
 *  INTDELAY - This loads the dead-man timer with its inital value.
-*    When this timer expires the interrupt is asserted, and the 
+*    When this timer expires the interrupt is asserted, and the
 *    timer is reset each time a new packet is received.  (see
 *    BUNDLEMAX below to set the limit on number of chained packets)
 *    The current default is 0x600 or 1536.  Experiments show that
 *    the value should probably stay within the 0x200 - 0x1000.
 *
-*  BUNDLEMAX - 
+*  BUNDLEMAX -
 *    This sets the maximum number of frames that will be bundled.  In
 *    some situations, such as the TCP windowing algorithm, it may be
 *    better to limit the growth of the bundle size than let it go as
@@ -1229,7 +1229,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 *    an interrupt for every frame received.  If you do not want to put
 *    a limit on the bundle size, set this value to xFFFF.
 *
-*  BUNDLESMALL - 
+*  BUNDLESMALL -
 *    This contains a bit-mask describing the minimum size frame that
 *    will be bundled.  The default masks the lower 7 bits, which means
 *    that any frame less than 128 bytes in length will not be bundled,
@@ -1244,7 +1244,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 *
 *    The current default is 0xFF80, which masks out the lower 7 bits.
 *    This means that any frame which is x7F (127) bytes or smaller
-*    will cause an immediate interrupt.  Because this value must be a 
+*    will cause an immediate interrupt.  Because this value must be a
 *    bit mask, there are only a few valid values that can be used.  To
 *    turn this feature off, the driver can write the value xFFFF to the
 *    lower word of this instruction (in the same way that the other
@@ -1253,7 +1253,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 *    standard Ethernet frames are <= 2047 bytes in length.
 *************************************************************************/
 
-/* if you wish to disable the ucode functionality, while maintaining the 
+/* if you wish to disable the ucode functionality, while maintaining the
  * workarounds it provides, set the following defines to:
  * BUNDLESMALL 0
  * BUNDLEMAX 1
@@ -1284,12 +1284,46 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 
                for (i = 0; i < UCODE_SIZE; i++)
                        cb->u.ucode[i] = cpu_to_le32(ucode[i]);
-               cb->command = cpu_to_le16(cb_ucode);
+               cb->command = cpu_to_le16(cb_ucode | cb_el);
                return;
        }
 
 noloaducode:
-       cb->command = cpu_to_le16(cb_nop);
+       cb->command = cpu_to_le16(cb_nop | cb_el);
+}
+
+static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
+       void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
+{
+       int err = 0, counter = 50;
+       struct cb *cb = nic->cb_to_clean;
+
+       if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
+               DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
+
+       /* must restart cuc */
+       nic->cuc_cmd = cuc_start;
+
+       /* wait for completion */
+       e100_write_flush(nic);
+       udelay(10);
+
+       /* wait for possibly (ouch) 500ms */
+       while (!(cb->status & cpu_to_le16(cb_complete))) {
+               msleep(10);
+               if (!--counter) break;
+       }
+
+       /* ack any interupts, something could have been set */
+       writeb(~0, &nic->csr->scb.stat_ack);
+
+       /* if the command failed, or is not OK, notify and return */
+       if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
+               DPRINTK(PROBE,ERR, "ucode load failed\n");
+               err = -EPERM;
+       }
+
+       return err;
 }
 
 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
@@ -1357,13 +1391,13 @@ static int e100_phy_init(struct nic *nic)
                mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
        }
 
-       if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && 
+       if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
           (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) {
                /* enable/disable MDI/MDI-X auto-switching.
                   MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */
                if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) ||
-                  (nic->mac == mac_82551_10) || (nic->mii.force_media) || 
-                  !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled)) 
+                  (nic->mac == mac_82551_10) || (nic->mii.force_media) ||
+                  !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
                        mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0);
                else
                        mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH);
@@ -1388,7 +1422,7 @@ static int e100_hw_init(struct nic *nic)
                return err;
        if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
                return err;
-       if((err = e100_exec_cb(nic, NULL, e100_load_ucode)))
+       if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
                return err;
        if((err = e100_exec_cb(nic, NULL, e100_configure)))
                return err;
@@ -1493,7 +1527,7 @@ static void e100_update_stats(struct nic *nic)
                }
        }
 
-       
+
        if(e100_exec_cmd(nic, cuc_dump_reset, 0))
                DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
 }
@@ -1542,10 +1576,10 @@ static void e100_watchdog(unsigned long data)
        mii_check_link(&nic->mii);
 
        /* Software generated interrupt to recover from (rare) Rx
-       * allocation failure.
-       * Unfortunately have to use a spinlock to not re-enable interrupts
-       * accidentally, due to hardware that shares a register between the
-       * interrupt mask bit and the SW Interrupt generation bit */
+        * allocation failure.
+        * Unfortunately have to use a spinlock to not re-enable interrupts
+        * accidentally, due to hardware that shares a register between the
+        * interrupt mask bit and the SW Interrupt generation bit */
        spin_lock_irq(&nic->cmd_lock);
        writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
        spin_unlock_irq(&nic->cmd_lock);
@@ -1830,7 +1864,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
        struct rx *rx_to_start = NULL;
 
        /* are we already rnr? then pay attention!!! this ensures that
-        * the state machine progression never allows a start with a 
+        * the state machine progression never allows a start with a
         * partially cleaned list, avoiding a race between hardware
         * and rx_to_clean when in NAPI mode */
        if(RU_SUSPENDED == nic->ru_running)
@@ -2066,7 +2100,7 @@ static void e100_tx_timeout(struct net_device *netdev)
 {
        struct nic *nic = netdev_priv(netdev);
 
-       /* Reset outside of interrupt context, to avoid request_irq 
+       /* Reset outside of interrupt context, to avoid request_irq
         * in interrupt context */
        schedule_work(&nic->tx_timeout_task);
 }
@@ -2313,7 +2347,7 @@ static int e100_set_ringparam(struct net_device *netdev,
        struct param_range *rfds = &nic->params.rfds;
        struct param_range *cbs = &nic->params.cbs;
 
-       if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 
+       if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
                return -EINVAL;
 
        if(netif_running(netdev))
@@ -2631,7 +2665,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
                nic->flags |= wol_magic;
 
        /* ack any pending wake events, disable PME */
-       pci_enable_wake(pdev, 0, 0);
+       err = pci_enable_wake(pdev, 0, 0);
+       if (err)
+               DPRINTK(PROBE, ERR, "Error clearing wake event\n");
 
        strcpy(netdev->name, "eth%d");
        if((err = register_netdev(netdev))) {
@@ -2682,6 +2718,7 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct nic *nic = netdev_priv(netdev);
+       int retval;
 
        if(netif_running(netdev))
                e100_down(nic);
@@ -2689,9 +2726,14 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
        netif_device_detach(netdev);
 
        pci_save_state(pdev);
-       pci_enable_wake(pdev, pci_choose_state(pdev, state), nic->flags & (wol_magic | e100_asf(nic)));
+       retval = pci_enable_wake(pdev, pci_choose_state(pdev, state),
+                                nic->flags & (wol_magic | e100_asf(nic)));
+       if (retval)
+               DPRINTK(PROBE,ERR, "Error enabling wake\n");
        pci_disable_device(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       if (retval)
+               DPRINTK(PROBE,ERR, "Error %d setting power state\n", retval);
 
        return 0;
 }
@@ -2700,11 +2742,16 @@ static int e100_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct nic *nic = netdev_priv(netdev);
+       int retval;
 
-       pci_set_power_state(pdev, PCI_D0);
+       retval = pci_set_power_state(pdev, PCI_D0);
+       if (retval)
+               DPRINTK(PROBE,ERR, "Error waking adapter\n");
        pci_restore_state(pdev);
        /* ack any pending wake events, disable PME */
-       pci_enable_wake(pdev, 0, 0);
+       retval = pci_enable_wake(pdev, 0, 0);
+       if (retval)
+               DPRINTK(PROBE,ERR, "Error clearing wake events\n");
        if(e100_hw_init(nic))
                DPRINTK(HW, ERR, "e100_hw_init failed\n");
 
@@ -2721,12 +2768,15 @@ static void e100_shutdown(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct nic *nic = netdev_priv(netdev);
+       int retval;
 
 #ifdef CONFIG_PM
-       pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
+       retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
 #else
-       pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
+       retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
 #endif
+       if (retval)
+               DPRINTK(PROBE,ERR, "Error enabling wake\n");
 }
 
 
@@ -2739,7 +2789,7 @@ static struct pci_driver e100_driver = {
        .suspend =      e100_suspend,
        .resume =       e100_resume,
 #endif
-       .shutdown =     e100_shutdown,
+       .shutdown =     e100_shutdown,
 };
 
 static int __init e100_init_module(void)
index e02e9ba2e18b0b66166f2f877a2e282f0a4926f8..27c77306193b2cb14e17589455ec35ca6d2e4576 100644 (file)
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
-#ifdef CONFIG_E1000_MQ
-#include <linux/cpu.h>
-#include <linux/smp.h>
-#endif
 
 #define BAR_0          0
 #define BAR_1          1
 struct e1000_adapter;
 
 #include "e1000_hw.h"
+#ifdef CONFIG_E1000_MQ
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#endif
 
 #ifdef DBG
 #define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
@@ -169,6 +169,13 @@ struct e1000_buffer {
        uint16_t next_to_watch;
 };
 
+#ifdef CONFIG_E1000_MQ
+struct e1000_queue_stats {
+       uint64_t packets;
+       uint64_t bytes;
+};
+#endif
+
 struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
 struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
 
@@ -191,10 +198,12 @@ struct e1000_tx_ring {
        spinlock_t tx_lock;
        uint16_t tdh;
        uint16_t tdt;
-       uint64_t pkt;
 
        boolean_t last_tx_tso;
 
+#ifdef CONFIG_E1000_MQ
+       struct e1000_queue_stats tx_stats;
+#endif
 };
 
 struct e1000_rx_ring {
@@ -216,9 +225,17 @@ struct e1000_rx_ring {
        struct e1000_ps_page *ps_page;
        struct e1000_ps_page_dma *ps_page_dma;
 
+       struct sk_buff *rx_skb_top;
+       struct sk_buff *rx_skb_prev;
+
+       /* cpu for rx queue */
+       int cpu;
+
        uint16_t rdh;
        uint16_t rdt;
-       uint64_t pkt;
+#ifdef CONFIG_E1000_MQ
+       struct e1000_queue_stats rx_stats;
+#endif
 };
 
 #define E1000_DESC_UNUSED(R) \
@@ -251,6 +268,9 @@ struct e1000_adapter {
        uint16_t link_speed;
        uint16_t link_duplex;
        spinlock_t stats_lock;
+#ifdef CONFIG_E1000_NAPI
+       spinlock_t tx_queue_lock;
+#endif
        atomic_t irq_sem;
        struct work_struct tx_timeout_task;
        struct work_struct watchdog_task;
@@ -264,6 +284,7 @@ struct e1000_adapter {
 #ifdef CONFIG_E1000_MQ
        struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
 #endif
+       unsigned long tx_queue_len;
        uint32_t txd_cmd;
        uint32_t tx_int_delay;
        uint32_t tx_abs_int_delay;
@@ -271,9 +292,11 @@ struct e1000_adapter {
        uint64_t gotcl_old;
        uint64_t tpt_old;
        uint64_t colc_old;
+       uint32_t tx_timeout_count;
        uint32_t tx_fifo_head;
        uint32_t tx_head_addr;
        uint32_t tx_fifo_size;
+       uint8_t  tx_timeout_factor;
        atomic_t tx_fifo_stall;
        boolean_t pcix_82544;
        boolean_t detect_tx_hung;
@@ -281,14 +304,15 @@ struct e1000_adapter {
        /* RX */
 #ifdef CONFIG_E1000_NAPI
        boolean_t (*clean_rx) (struct e1000_adapter *adapter,
-                              struct e1000_rx_ring *rx_ring,
-                              int *work_done, int work_to_do);
+                                                  struct e1000_rx_ring *rx_ring,
+                                                  int *work_done, int work_to_do);
 #else
        boolean_t (*clean_rx) (struct e1000_adapter *adapter,
-                              struct e1000_rx_ring *rx_ring);
+                                                  struct e1000_rx_ring *rx_ring);
 #endif
        void (*alloc_rx_buf) (struct e1000_adapter *adapter,
-                             struct e1000_rx_ring *rx_ring);
+                                                 struct e1000_rx_ring *rx_ring,
+                                                 int cleaned_count);
        struct e1000_rx_ring *rx_ring;      /* One per active queue */
 #ifdef CONFIG_E1000_NAPI
        struct net_device *polling_netdev;  /* One per active queue */
@@ -296,13 +320,15 @@ struct e1000_adapter {
 #ifdef CONFIG_E1000_MQ
        struct net_device **cpu_netdev;     /* per-cpu */
        struct call_async_data_struct rx_sched_call_data;
-       int cpu_for_queue[4];
+       cpumask_t cpumask;
 #endif
-       int num_queues;
+       int num_tx_queues;
+       int num_rx_queues;
 
        uint64_t hw_csum_err;
        uint64_t hw_csum_good;
        uint64_t rx_hdr_split;
+       uint32_t alloc_rx_buff_failed;
        uint32_t rx_int_delay;
        uint32_t rx_abs_int_delay;
        boolean_t rx_csum;
@@ -330,6 +356,7 @@ struct e1000_adapter {
        struct e1000_rx_ring test_rx_ring;
 
 
+       u32 *config_space;
        int msg_enable;
 #ifdef CONFIG_PCI_MSI
        boolean_t have_msi;
index c88f1a3c1b1db7d58a87a779d6517480e514d31c..5cedc81786e3a4f28d0a9b6881b96fe369c84bef 100644 (file)
@@ -80,6 +80,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
        { "tx_deferred_ok", E1000_STAT(stats.dc) },
        { "tx_single_coll_ok", E1000_STAT(stats.scc) },
        { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
+       { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
        { "rx_long_length_errors", E1000_STAT(stats.roc) },
        { "rx_short_length_errors", E1000_STAT(stats.ruc) },
        { "rx_align_errors", E1000_STAT(stats.algnerrc) },
@@ -93,9 +94,20 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
        { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
        { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
        { "rx_header_split", E1000_STAT(rx_hdr_split) },
+       { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
 };
-#define E1000_STATS_LEN        \
+
+#ifdef CONFIG_E1000_MQ
+#define E1000_QUEUE_STATS_LEN \
+       (((struct e1000_adapter *)netdev->priv)->num_tx_queues + \
+        ((struct e1000_adapter *)netdev->priv)->num_rx_queues) \
+       * (sizeof(struct e1000_queue_stats) / sizeof(uint64_t))
+#else
+#define E1000_QUEUE_STATS_LEN 0
+#endif
+#define E1000_GLOBAL_STATS_LEN \
        sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
+#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
 static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
        "Register test  (offline)", "Eeprom test    (offline)",
        "Interrupt test (offline)", "Loopback test  (offline)",
@@ -109,7 +121,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       if(hw->media_type == e1000_media_type_copper) {
+       if (hw->media_type == e1000_media_type_copper) {
 
                ecmd->supported = (SUPPORTED_10baseT_Half |
                                   SUPPORTED_10baseT_Full |
@@ -121,7 +133,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 
                ecmd->advertising = ADVERTISED_TP;
 
-               if(hw->autoneg == 1) {
+               if (hw->autoneg == 1) {
                        ecmd->advertising |= ADVERTISED_Autoneg;
 
                        /* the e1000 autoneg seems to match ethtool nicely */
@@ -132,7 +144,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                ecmd->port = PORT_TP;
                ecmd->phy_address = hw->phy_addr;
 
-               if(hw->mac_type == e1000_82543)
+               if (hw->mac_type == e1000_82543)
                        ecmd->transceiver = XCVR_EXTERNAL;
                else
                        ecmd->transceiver = XCVR_INTERNAL;
@@ -148,13 +160,13 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 
                ecmd->port = PORT_FIBRE;
 
-               if(hw->mac_type >= e1000_82545)
+               if (hw->mac_type >= e1000_82545)
                        ecmd->transceiver = XCVR_INTERNAL;
                else
                        ecmd->transceiver = XCVR_EXTERNAL;
        }
 
-       if(netif_carrier_ok(adapter->netdev)) {
+       if (netif_carrier_ok(adapter->netdev)) {
 
                e1000_get_speed_and_duplex(hw, &adapter->link_speed,
                                                   &adapter->link_duplex);
@@ -163,7 +175,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
                 *          and HALF_DUPLEX != DUPLEX_HALF */
 
-               if(adapter->link_duplex == FULL_DUPLEX)
+               if (adapter->link_duplex == FULL_DUPLEX)
                        ecmd->duplex = DUPLEX_FULL;
                else
                        ecmd->duplex = DUPLEX_HALF;
@@ -183,13 +195,21 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       if(ecmd->autoneg == AUTONEG_ENABLE) {
+       /* When SoL/IDER sessions are active, autoneg/speed/duplex
+        * cannot be changed */
+       if (e1000_check_phy_reset_block(hw)) {
+               DPRINTK(DRV, ERR, "Cannot change link characteristics "
+                       "when SoL/IDER is active.\n");
+               return -EINVAL;
+       }
+
+       if (ecmd->autoneg == AUTONEG_ENABLE) {
                hw->autoneg = 1;
-               if(hw->media_type == e1000_media_type_fiber)
+               if (hw->media_type == e1000_media_type_fiber)
                        hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
                                     ADVERTISED_FIBRE |
                                     ADVERTISED_Autoneg;
-               else 
+               else
                        hw->autoneg_advertised = ADVERTISED_10baseT_Half |
                                                  ADVERTISED_10baseT_Full |
                                                  ADVERTISED_100baseT_Half |
@@ -199,12 +219,12 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
                                                  ADVERTISED_TP;
                ecmd->advertising = hw->autoneg_advertised;
        } else
-               if(e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
+               if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
                        return -EINVAL;
 
        /* reset the link */
 
-       if(netif_running(adapter->netdev)) {
+       if (netif_running(adapter->netdev)) {
                e1000_down(adapter);
                e1000_reset(adapter);
                e1000_up(adapter);
@@ -221,14 +241,14 @@ e1000_get_pauseparam(struct net_device *netdev,
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       pause->autoneg = 
+       pause->autoneg =
                (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
-       
-       if(hw->fc == e1000_fc_rx_pause)
+
+       if (hw->fc == e1000_fc_rx_pause)
                pause->rx_pause = 1;
-       else if(hw->fc == e1000_fc_tx_pause)
+       else if (hw->fc == e1000_fc_tx_pause)
                pause->tx_pause = 1;
-       else if(hw->fc == e1000_fc_full) {
+       else if (hw->fc == e1000_fc_full) {
                pause->rx_pause = 1;
                pause->tx_pause = 1;
        }
@@ -240,31 +260,30 @@ e1000_set_pauseparam(struct net_device *netdev,
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       
+
        adapter->fc_autoneg = pause->autoneg;
 
-       if(pause->rx_pause && pause->tx_pause)
+       if (pause->rx_pause && pause->tx_pause)
                hw->fc = e1000_fc_full;
-       else if(pause->rx_pause && !pause->tx_pause)
+       else if (pause->rx_pause && !pause->tx_pause)
                hw->fc = e1000_fc_rx_pause;
-       else if(!pause->rx_pause && pause->tx_pause)
+       else if (!pause->rx_pause && pause->tx_pause)
                hw->fc = e1000_fc_tx_pause;
-       else if(!pause->rx_pause && !pause->tx_pause)
+       else if (!pause->rx_pause && !pause->tx_pause)
                hw->fc = e1000_fc_none;
 
        hw->original_fc = hw->fc;
 
-       if(adapter->fc_autoneg == AUTONEG_ENABLE) {
-               if(netif_running(adapter->netdev)) {
+       if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+               if (netif_running(adapter->netdev)) {
                        e1000_down(adapter);
                        e1000_up(adapter);
                } else
                        e1000_reset(adapter);
-       }
-       else
+       } else
                return ((hw->media_type == e1000_media_type_fiber) ?
                        e1000_setup_link(hw) : e1000_force_mac_fc(hw));
-       
+
        return 0;
 }
 
@@ -281,14 +300,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        adapter->rx_csum = data;
 
-       if(netif_running(netdev)) {
+       if (netif_running(netdev)) {
                e1000_down(adapter);
                e1000_up(adapter);
        } else
                e1000_reset(adapter);
        return 0;
 }
-       
+
 static uint32_t
 e1000_get_tx_csum(struct net_device *netdev)
 {
@@ -300,7 +319,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
-       if(adapter->hw.mac_type < e1000_82543) {
+       if (adapter->hw.mac_type < e1000_82543) {
                if (!data)
                        return -EINVAL;
                return 0;
@@ -319,8 +338,8 @@ static int
 e1000_set_tso(struct net_device *netdev, uint32_t data)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       if((adapter->hw.mac_type < e1000_82544) ||
-           (adapter->hw.mac_type == e1000_82547)) 
+       if ((adapter->hw.mac_type < e1000_82544) ||
+           (adapter->hw.mac_type == e1000_82547))
                return data ? -EINVAL : 0;
 
        if (data)
@@ -328,7 +347,7 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
        else
                netdev->features &= ~NETIF_F_TSO;
        return 0;
-} 
+}
 #endif /* NETIF_F_TSO */
 
 static uint32_t
@@ -345,7 +364,7 @@ e1000_set_msglevel(struct net_device *netdev, uint32_t data)
        adapter->msg_enable = data;
 }
 
-static int 
+static int
 e1000_get_regs_len(struct net_device *netdev)
 {
 #define E1000_REGS_LEN 32
@@ -381,7 +400,7 @@ e1000_get_regs(struct net_device *netdev,
        regs_buff[11] = E1000_READ_REG(hw, TIDV);
 
        regs_buff[12] = adapter->hw.phy_type;  /* PHY type (IGP=1, M88=0) */
-       if(hw->phy_type == e1000_phy_igp) {
+       if (hw->phy_type == e1000_phy_igp) {
                e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
                                    IGP01E1000_PHY_AGC_A);
                e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
@@ -435,7 +454,7 @@ e1000_get_regs(struct net_device *netdev,
        e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
        regs_buff[24] = (uint32_t)phy_data;  /* phy local receiver status */
        regs_buff[25] = regs_buff[24];  /* phy remote receiver status */
-       if(hw->mac_type >= e1000_82540 &&
+       if (hw->mac_type >= e1000_82540 &&
           hw->media_type == e1000_media_type_copper) {
                regs_buff[26] = E1000_READ_REG(hw, MANC);
        }
@@ -459,7 +478,7 @@ e1000_get_eeprom(struct net_device *netdev,
        int ret_val = 0;
        uint16_t i;
 
-       if(eeprom->len == 0)
+       if (eeprom->len == 0)
                return -EINVAL;
 
        eeprom->magic = hw->vendor_id | (hw->device_id << 16);
@@ -469,16 +488,16 @@ e1000_get_eeprom(struct net_device *netdev,
 
        eeprom_buff = kmalloc(sizeof(uint16_t) *
                        (last_word - first_word + 1), GFP_KERNEL);
-       if(!eeprom_buff)
+       if (!eeprom_buff)
                return -ENOMEM;
 
-       if(hw->eeprom.type == e1000_eeprom_spi)
+       if (hw->eeprom.type == e1000_eeprom_spi)
                ret_val = e1000_read_eeprom(hw, first_word,
                                            last_word - first_word + 1,
                                            eeprom_buff);
        else {
                for (i = 0; i < last_word - first_word + 1; i++)
-                       if((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
+                       if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
                                                        &eeprom_buff[i])))
                                break;
        }
@@ -505,10 +524,10 @@ e1000_set_eeprom(struct net_device *netdev,
        int max_len, first_word, last_word, ret_val = 0;
        uint16_t i;
 
-       if(eeprom->len == 0)
+       if (eeprom->len == 0)
                return -EOPNOTSUPP;
 
-       if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+       if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
                return -EFAULT;
 
        max_len = hw->eeprom.word_size * 2;
@@ -516,19 +535,19 @@ e1000_set_eeprom(struct net_device *netdev,
        first_word = eeprom->offset >> 1;
        last_word = (eeprom->offset + eeprom->len - 1) >> 1;
        eeprom_buff = kmalloc(max_len, GFP_KERNEL);
-       if(!eeprom_buff)
+       if (!eeprom_buff)
                return -ENOMEM;
 
        ptr = (void *)eeprom_buff;
 
-       if(eeprom->offset & 1) {
+       if (eeprom->offset & 1) {
                /* need read/modify/write of first changed EEPROM word */
                /* only the second byte of the word is being modified */
                ret_val = e1000_read_eeprom(hw, first_word, 1,
                                            &eeprom_buff[0]);
                ptr++;
        }
-       if(((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+       if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
                /* need read/modify/write of last changed EEPROM word */
                /* only the first byte of the word is being modified */
                ret_val = e1000_read_eeprom(hw, last_word, 1,
@@ -547,9 +566,9 @@ e1000_set_eeprom(struct net_device *netdev,
        ret_val = e1000_write_eeprom(hw, first_word,
                                     last_word - first_word + 1, eeprom_buff);
 
-       /* Update the checksum over the first part of the EEPROM if needed 
+       /* Update the checksum over the first part of the EEPROM if needed
         * and flush shadow RAM for 82573 conrollers */
-       if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) || 
+       if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
                                (hw->mac_type == e1000_82573)))
                e1000_update_eeprom_checksum(hw);
 
@@ -567,21 +586,21 @@ e1000_get_drvinfo(struct net_device *netdev,
 
        strncpy(drvinfo->driver,  e1000_driver_name, 32);
        strncpy(drvinfo->version, e1000_driver_version, 32);
-       
-       /* EEPROM image version # is reported as firware version # for
+
+       /* EEPROM image version # is reported as firmware version # for
         * 8257{1|2|3} controllers */
        e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data);
        switch (adapter->hw.mac_type) {
        case e1000_82571:
        case e1000_82572:
        case e1000_82573:
-               sprintf(firmware_version, "%d.%d-%d", 
+               sprintf(firmware_version, "%d.%d-%d",
                        (eeprom_data & 0xF000) >> 12,
                        (eeprom_data & 0x0FF0) >> 4,
                        eeprom_data & 0x000F);
                break;
        default:
-               sprintf(firmware_version, "n/a");
+               sprintf(firmware_version, "N/A");
        }
 
        strncpy(drvinfo->fw_version, firmware_version, 32);
@@ -613,7 +632,7 @@ e1000_get_ringparam(struct net_device *netdev,
        ring->rx_jumbo_pending = 0;
 }
 
-static int 
+static int
 e1000_set_ringparam(struct net_device *netdev,
                     struct ethtool_ringparam *ring)
 {
@@ -623,8 +642,8 @@ e1000_set_ringparam(struct net_device *netdev,
        struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
        int i, err, tx_ring_size, rx_ring_size;
 
-       tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
-       rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
+       tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
+       rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
 
        if (netif_running(adapter->netdev))
                e1000_down(adapter);
@@ -650,25 +669,25 @@ e1000_set_ringparam(struct net_device *netdev,
        txdr = adapter->tx_ring;
        rxdr = adapter->rx_ring;
 
-       if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+       if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
                return -EINVAL;
 
        rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
        rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
                E1000_MAX_RXD : E1000_MAX_82544_RXD));
-       E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); 
+       E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
 
        txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
        txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
                E1000_MAX_TXD : E1000_MAX_82544_TXD));
-       E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 
+       E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
 
-       for (i = 0; i < adapter->num_queues; i++) {
+       for (i = 0; i < adapter->num_tx_queues; i++)
                txdr[i].count = txdr->count;
+       for (i = 0; i < adapter->num_rx_queues; i++)
                rxdr[i].count = rxdr->count;
-       }
 
-       if(netif_running(adapter->netdev)) {
+       if (netif_running(adapter->netdev)) {
                /* Try to get new resources before deleting old */
                if ((err = e1000_setup_all_rx_resources(adapter)))
                        goto err_setup_rx;
@@ -688,7 +707,7 @@ e1000_set_ringparam(struct net_device *netdev,
                kfree(rx_old);
                adapter->rx_ring = rx_new;
                adapter->tx_ring = tx_new;
-               if((err = e1000_up(adapter)))
+               if ((err = e1000_up(adapter)))
                        return err;
        }
 
@@ -707,10 +726,10 @@ err_setup_rx:
        uint32_t pat, value;                                                   \
        uint32_t test[] =                                                      \
                {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};              \
-       for(pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) {              \
+       for (pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) {              \
                E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W));             \
                value = E1000_READ_REG(&adapter->hw, R);                       \
-               if(value != (test[pat] & W & M)) {                             \
+               if (value != (test[pat] & W & M)) {                             \
                        DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \
                                "0x%08X expected 0x%08X\n",                    \
                                E1000_##R, value, (test[pat] & W & M));        \
@@ -726,7 +745,7 @@ err_setup_rx:
        uint32_t value;                                                        \
        E1000_WRITE_REG(&adapter->hw, R, W & M);                               \
        value = E1000_READ_REG(&adapter->hw, R);                               \
-       if((W & M) != (value & M)) {                                          \
+       if ((W & M) != (value & M)) {                                          \
                DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
                        "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \
                *data = (adapter->hw.mac_type < e1000_82543) ?                 \
@@ -762,7 +781,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
        value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle);
        E1000_WRITE_REG(&adapter->hw, STATUS, toggle);
        after = E1000_READ_REG(&adapter->hw, STATUS) & toggle;
-       if(value != after) {
+       if (value != after) {
                DPRINTK(DRV, ERR, "failed STATUS register test got: "
                        "0x%08X expected: 0x%08X\n", after, value);
                *data = 1;
@@ -790,7 +809,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
        REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB);
        REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
 
-       if(adapter->hw.mac_type >= e1000_82543) {
+       if (adapter->hw.mac_type >= e1000_82543) {
 
                REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF);
                REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
@@ -798,7 +817,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
                REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
                REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
 
-               for(i = 0; i < E1000_RAR_ENTRIES; i++) {
+               for (i = 0; i < E1000_RAR_ENTRIES; i++) {
                        REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF,
                                         0xFFFFFFFF);
                        REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
@@ -814,7 +833,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
 
        }
 
-       for(i = 0; i < E1000_MC_TBL_SIZE; i++)
+       for (i = 0; i < E1000_MC_TBL_SIZE; i++)
                REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
 
        *data = 0;
@@ -830,8 +849,8 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
 
        *data = 0;
        /* Read and add up the contents of the EEPROM */
-       for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
-               if((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
+       for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+               if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
                        *data = 1;
                        break;
                }
@@ -839,7 +858,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
        }
 
        /* If Checksum is not Correct return error else test passed */
-       if((checksum != (uint16_t) EEPROM_SUM) && !(*data))
+       if ((checksum != (uint16_t) EEPROM_SUM) && !(*data))
                *data = 2;
 
        return *data;
@@ -868,9 +887,9 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
        *data = 0;
 
        /* Hook up test interrupt handler just for this test */
-       if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
+       if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
                shared_int = FALSE;
-       } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ,
+       } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
                              netdev->name, netdev)){
                *data = 1;
                return -1;
@@ -881,12 +900,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
        msec_delay(10);
 
        /* Test each interrupt */
-       for(; i < 10; i++) {
+       for (; i < 10; i++) {
 
                /* Interrupt to test */
                mask = 1 << i;
 
-               if(!shared_int) {
+               if (!shared_int) {
                        /* Disable the interrupt to be reported in
                         * the cause register and then force the same
                         * interrupt and see if one gets posted.  If
@@ -897,8 +916,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
                        E1000_WRITE_REG(&adapter->hw, IMC, mask);
                        E1000_WRITE_REG(&adapter->hw, ICS, mask);
                        msec_delay(10);
-                       if(adapter->test_icr & mask) {
+
+                       if (adapter->test_icr & mask) {
                                *data = 3;
                                break;
                        }
@@ -915,12 +934,12 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
                E1000_WRITE_REG(&adapter->hw, ICS, mask);
                msec_delay(10);
 
-               if(!(adapter->test_icr & mask)) {
+               if (!(adapter->test_icr & mask)) {
                        *data = 4;
                        break;
                }
 
-               if(!shared_int) {
+               if (!shared_int) {
                        /* Disable the other interrupts to be reported in
                         * the cause register and then force the other
                         * interrupts and see if any get posted.  If
@@ -932,7 +951,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
                        E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
                        msec_delay(10);
 
-                       if(adapter->test_icr) {
+                       if (adapter->test_icr) {
                                *data = 5;
                                break;
                        }
@@ -957,40 +976,39 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
        struct pci_dev *pdev = adapter->pdev;
        int i;
 
-       if(txdr->desc && txdr->buffer_info) {
-               for(i = 0; i < txdr->count; i++) {
-                       if(txdr->buffer_info[i].dma)
+       if (txdr->desc && txdr->buffer_info) {
+               for (i = 0; i < txdr->count; i++) {
+                       if (txdr->buffer_info[i].dma)
                                pci_unmap_single(pdev, txdr->buffer_info[i].dma,
                                                 txdr->buffer_info[i].length,
                                                 PCI_DMA_TODEVICE);
-                       if(txdr->buffer_info[i].skb)
+                       if (txdr->buffer_info[i].skb)
                                dev_kfree_skb(txdr->buffer_info[i].skb);
                }
        }
 
-       if(rxdr->desc && rxdr->buffer_info) {
-               for(i = 0; i < rxdr->count; i++) {
-                       if(rxdr->buffer_info[i].dma)
+       if (rxdr->desc && rxdr->buffer_info) {
+               for (i = 0; i < rxdr->count; i++) {
+                       if (rxdr->buffer_info[i].dma)
                                pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
                                                 rxdr->buffer_info[i].length,
                                                 PCI_DMA_FROMDEVICE);
-                       if(rxdr->buffer_info[i].skb)
+                       if (rxdr->buffer_info[i].skb)
                                dev_kfree_skb(rxdr->buffer_info[i].skb);
                }
        }
 
-       if(txdr->desc) {
+       if (txdr->desc) {
                pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
                txdr->desc = NULL;
        }
-       if(rxdr->desc) {
+       if (rxdr->desc) {
                pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
                rxdr->desc = NULL;
        }
 
        kfree(txdr->buffer_info);
        txdr->buffer_info = NULL;
-
        kfree(rxdr->buffer_info);
        rxdr->buffer_info = NULL;
 
@@ -1008,11 +1026,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
 
        /* Setup Tx descriptor ring and Tx buffers */
 
-       if(!txdr->count)
-               txdr->count = E1000_DEFAULT_TXD;   
+       if (!txdr->count)
+               txdr->count = E1000_DEFAULT_TXD;
 
        size = txdr->count * sizeof(struct e1000_buffer);
-       if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
+       if (!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
                ret_val = 1;
                goto err_nomem;
        }
@@ -1020,7 +1038,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
 
        txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
        E1000_ROUNDUP(txdr->size, 4096);
-       if(!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
+       if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
                ret_val = 2;
                goto err_nomem;
        }
@@ -1039,12 +1057,12 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
                        E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
                        E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
 
-       for(i = 0; i < txdr->count; i++) {
+       for (i = 0; i < txdr->count; i++) {
                struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
                struct sk_buff *skb;
                unsigned int size = 1024;
 
-               if(!(skb = alloc_skb(size, GFP_KERNEL))) {
+               if (!(skb = alloc_skb(size, GFP_KERNEL))) {
                        ret_val = 3;
                        goto err_nomem;
                }
@@ -1064,18 +1082,18 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
 
        /* Setup Rx descriptor ring and Rx buffers */
 
-       if(!rxdr->count)
-               rxdr->count = E1000_DEFAULT_RXD;   
+       if (!rxdr->count)
+               rxdr->count = E1000_DEFAULT_RXD;
 
        size = rxdr->count * sizeof(struct e1000_buffer);
-       if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
+       if (!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
                ret_val = 4;
                goto err_nomem;
        }
        memset(rxdr->buffer_info, 0, size);
 
        rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
-       if(!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
+       if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
                ret_val = 5;
                goto err_nomem;
        }
@@ -1095,11 +1113,11 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
                (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
        E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
 
-       for(i = 0; i < rxdr->count; i++) {
+       for (i = 0; i < rxdr->count; i++) {
                struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
                struct sk_buff *skb;
 
-               if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
+               if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
                                GFP_KERNEL))) {
                        ret_val = 6;
                        goto err_nomem;
@@ -1208,15 +1226,15 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
 
        /* Check Phy Configuration */
        e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
-       if(phy_reg != 0x4100)
+       if (phy_reg != 0x4100)
                 return 9;
 
        e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
-       if(phy_reg != 0x0070)
+       if (phy_reg != 0x0070)
                return 10;
 
        e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
-       if(phy_reg != 0x001A)
+       if (phy_reg != 0x001A)
                return 11;
 
        return 0;
@@ -1230,7 +1248,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
 
        adapter->hw.autoneg = FALSE;
 
-       if(adapter->hw.phy_type == e1000_phy_m88) {
+       if (adapter->hw.phy_type == e1000_phy_m88) {
                /* Auto-MDI/MDIX Off */
                e1000_write_phy_reg(&adapter->hw,
                                    M88E1000_PHY_SPEC_CTRL, 0x0808);
@@ -1250,14 +1268,14 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
                     E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
                     E1000_CTRL_FD);     /* Force Duplex to FULL */
 
-       if(adapter->hw.media_type == e1000_media_type_copper &&
+       if (adapter->hw.media_type == e1000_media_type_copper &&
           adapter->hw.phy_type == e1000_phy_m88) {
                ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
        } else {
                /* Set the ILOS bit on the fiber Nic is half
                 * duplex link is detected. */
                stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
-               if((stat_reg & E1000_STATUS_FD) == 0)
+               if ((stat_reg & E1000_STATUS_FD) == 0)
                        ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
        }
 
@@ -1266,7 +1284,7 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
        /* Disable the receiver on the PHY so when a cable is plugged in, the
         * PHY does not begin to autoneg when a cable is reconnected to the NIC.
         */
-       if(adapter->hw.phy_type == e1000_phy_m88)
+       if (adapter->hw.phy_type == e1000_phy_m88)
                e1000_phy_disable_receiver(adapter);
 
        udelay(500);
@@ -1282,14 +1300,14 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
 
        switch (adapter->hw.mac_type) {
        case e1000_82543:
-               if(adapter->hw.media_type == e1000_media_type_copper) {
+               if (adapter->hw.media_type == e1000_media_type_copper) {
                        /* Attempt to setup Loopback mode on Non-integrated PHY.
                         * Some PHY registers get corrupted at random, so
                         * attempt this 10 times.
                         */
-                       while(e1000_nonintegrated_phy_loopback(adapter) &&
+                       while (e1000_nonintegrated_phy_loopback(adapter) &&
                              count++ < 10);
-                       if(count < 11)
+                       if (count < 11)
                                return 0;
                }
                break;
@@ -1327,11 +1345,11 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
 static int
 e1000_setup_loopback_test(struct e1000_adapter *adapter)
 {
-       uint32_t rctl;
        struct e1000_hw *hw = &adapter->hw;
+       uint32_t rctl;
 
        if (hw->media_type == e1000_media_type_fiber ||
-          hw->media_type == e1000_media_type_internal_serdes) {
+           hw->media_type == e1000_media_type_internal_serdes) {
                switch (hw->mac_type) {
                case e1000_82545:
                case e1000_82546:
@@ -1362,25 +1380,25 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter)
 static void
 e1000_loopback_cleanup(struct e1000_adapter *adapter)
 {
+       struct e1000_hw *hw = &adapter->hw;
        uint32_t rctl;
        uint16_t phy_reg;
-       struct e1000_hw *hw = &adapter->hw;
 
-       rctl = E1000_READ_REG(&adapter->hw, RCTL);
+       rctl = E1000_READ_REG(hw, RCTL);
        rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
-       E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+       E1000_WRITE_REG(hw, RCTL, rctl);
 
        switch (hw->mac_type) {
        case e1000_82571:
        case e1000_82572:
                if (hw->media_type == e1000_media_type_fiber ||
-                  hw->media_type == e1000_media_type_internal_serdes){
+                   hw->media_type == e1000_media_type_internal_serdes) {
 #define E1000_SERDES_LB_OFF 0x400
                        E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF);
                        msec_delay(10);
                        break;
                }
-               /* fall thru for Cu adapters */
+               /* Fall Through */
        case e1000_82545:
        case e1000_82546:
        case e1000_82545_rev_3:
@@ -1401,7 +1419,7 @@ static void
 e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
 {
        memset(skb->data, 0xFF, frame_size);
-       frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size;
+       frame_size &= ~1;
        memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
        memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
        memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
@@ -1410,9 +1428,9 @@ e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
 static int
 e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
 {
-       frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size;
-       if(*(skb->data + 3) == 0xFF) {
-               if((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+       frame_size &= ~1;
+       if (*(skb->data + 3) == 0xFF) {
+               if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
                   (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
                        return 0;
                }
@@ -1431,53 +1449,53 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
 
        E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
 
-       /* Calculate the loop count based on the largest descriptor ring 
+       /* Calculate the loop count based on the largest descriptor ring
         * The idea is to wrap the largest ring a number of times using 64
         * send/receive pairs during each loop
         */
 
-       if(rxdr->count <= txdr->count)
+       if (rxdr->count <= txdr->count)
                lc = ((txdr->count / 64) * 2) + 1;
        else
                lc = ((rxdr->count / 64) * 2) + 1;
 
        k = l = 0;
-       for(j = 0; j <= lc; j++) { /* loop count loop */
-               for(i = 0; i < 64; i++) { /* send the packets */
-                       e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 
+       for (j = 0; j <= lc; j++) { /* loop count loop */
+               for (i = 0; i < 64; i++) { /* send the packets */
+                       e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
                                        1024);
-                       pci_dma_sync_single_for_device(pdev, 
+                       pci_dma_sync_single_for_device(pdev,
                                        txdr->buffer_info[k].dma,
                                        txdr->buffer_info[k].length,
                                        PCI_DMA_TODEVICE);
-                       if(unlikely(++k == txdr->count)) k = 0;
+                       if (unlikely(++k == txdr->count)) k = 0;
                }
                E1000_WRITE_REG(&adapter->hw, TDT, k);
                msec_delay(200);
                time = jiffies; /* set the start time for the receive */
                good_cnt = 0;
                do { /* receive the sent packets */
-                       pci_dma_sync_single_for_cpu(pdev, 
+                       pci_dma_sync_single_for_cpu(pdev,
                                        rxdr->buffer_info[l].dma,
                                        rxdr->buffer_info[l].length,
                                        PCI_DMA_FROMDEVICE);
-       
+
                        ret_val = e1000_check_lbtest_frame(
                                        rxdr->buffer_info[l].skb,
                                        1024);
-                       if(!ret_val)
+                       if (!ret_val)
                                good_cnt++;
-                       if(unlikely(++l == rxdr->count)) l = 0;
-                       /* time + 20 msecs (200 msecs on 2.4) is more than 
-                        * enough time to complete the receives, if it's 
+                       if (unlikely(++l == rxdr->count)) l = 0;
+                       /* time + 20 msecs (200 msecs on 2.4) is more than
+                        * enough time to complete the receives, if it's
                         * exceeded, break and error off
                         */
                } while (good_cnt < 64 && jiffies < (time + 20));
-               if(good_cnt != 64) {
+               if (good_cnt != 64) {
                        ret_val = 13; /* ret_val is the same as mis-compare */
-                       break; 
+                       break;
                }
-               if(jiffies >= (time + 2)) {
+               if (jiffies >= (time + 2)) {
                        ret_val = 14; /* error code for time out error */
                        break;
                }
@@ -1488,14 +1506,25 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
 static int
 e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
 {
-       if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback;
-       if((*data = e1000_setup_loopback_test(adapter)))
-               goto err_loopback_setup;
+       /* PHY loopback cannot be performed if SoL/IDER
+        * sessions are active */
+       if (e1000_check_phy_reset_block(&adapter->hw)) {
+               DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
+                       "when SoL/IDER is active.\n");
+               *data = 0;
+               goto out;
+       }
+
+       if ((*data = e1000_setup_desc_rings(adapter)))
+               goto out;
+       if ((*data = e1000_setup_loopback_test(adapter)))
+               goto err_loopback;
        *data = e1000_run_loopback_test(adapter);
        e1000_loopback_cleanup(adapter);
-err_loopback_setup:
-       e1000_free_desc_rings(adapter);
+
 err_loopback:
+       e1000_free_desc_rings(adapter);
+out:
        return *data;
 }
 
@@ -1519,17 +1548,17 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
                *data = 1;
        } else {
                e1000_check_for_link(&adapter->hw);
-               if(adapter->hw.autoneg)  /* if auto_neg is set wait for it */
+               if (adapter->hw.autoneg)  /* if auto_neg is set wait for it */
                        msec_delay(4000);
 
-               if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
+               if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
                        *data = 1;
                }
        }
        return *data;
 }
 
-static int 
+static int
 e1000_diag_test_count(struct net_device *netdev)
 {
        return E1000_TEST_LEN;
@@ -1542,7 +1571,7 @@ e1000_diag_test(struct net_device *netdev,
        struct e1000_adapter *adapter = netdev_priv(netdev);
        boolean_t if_running = netif_running(netdev);
 
-       if(eth_test->flags == ETH_TEST_FL_OFFLINE) {
+       if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
                /* Offline tests */
 
                /* save speed, duplex, autoneg settings */
@@ -1552,27 +1581,27 @@ e1000_diag_test(struct net_device *netdev,
 
                /* Link test performed before hardware reset so autoneg doesn't
                 * interfere with test result */
-               if(e1000_link_test(adapter, &data[4]))
+               if (e1000_link_test(adapter, &data[4]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
-               if(if_running)
+               if (if_running)
                        e1000_down(adapter);
                else
                        e1000_reset(adapter);
 
-               if(e1000_reg_test(adapter, &data[0]))
+               if (e1000_reg_test(adapter, &data[0]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
                e1000_reset(adapter);
-               if(e1000_eeprom_test(adapter, &data[1]))
+               if (e1000_eeprom_test(adapter, &data[1]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
                e1000_reset(adapter);
-               if(e1000_intr_test(adapter, &data[2]))
+               if (e1000_intr_test(adapter, &data[2]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
                e1000_reset(adapter);
-               if(e1000_loopback_test(adapter, &data[3]))
+               if (e1000_loopback_test(adapter, &data[3]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
                /* restore speed, duplex, autoneg settings */
@@ -1581,11 +1610,11 @@ e1000_diag_test(struct net_device *netdev,
                adapter->hw.autoneg = autoneg;
 
                e1000_reset(adapter);
-               if(if_running)
+               if (if_running)
                        e1000_up(adapter);
        } else {
                /* Online tests */
-               if(e1000_link_test(adapter, &data[4]))
+               if (e1000_link_test(adapter, &data[4]))
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
                /* Offline tests aren't run; pass by default */
@@ -1603,7 +1632,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       switch(adapter->hw.device_id) {
+       switch (adapter->hw.device_id) {
        case E1000_DEV_ID_82542:
        case E1000_DEV_ID_82543GC_FIBER:
        case E1000_DEV_ID_82543GC_COPPER:
@@ -1617,8 +1646,9 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 
        case E1000_DEV_ID_82546EB_FIBER:
        case E1000_DEV_ID_82546GB_FIBER:
+       case E1000_DEV_ID_82571EB_FIBER:
                /* Wake events only supported on port A for dual fiber */
-               if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
+               if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
                        wol->supported = 0;
                        wol->wolopts   = 0;
                        return;
@@ -1630,13 +1660,13 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
                                 WAKE_BCAST | WAKE_MAGIC;
 
                wol->wolopts = 0;
-               if(adapter->wol & E1000_WUFC_EX)
+               if (adapter->wol & E1000_WUFC_EX)
                        wol->wolopts |= WAKE_UCAST;
-               if(adapter->wol & E1000_WUFC_MC)
+               if (adapter->wol & E1000_WUFC_MC)
                        wol->wolopts |= WAKE_MCAST;
-               if(adapter->wol & E1000_WUFC_BC)
+               if (adapter->wol & E1000_WUFC_BC)
                        wol->wolopts |= WAKE_BCAST;
-               if(adapter->wol & E1000_WUFC_MAG)
+               if (adapter->wol & E1000_WUFC_MAG)
                        wol->wolopts |= WAKE_MAGIC;
                return;
        }
@@ -1648,7 +1678,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       switch(adapter->hw.device_id) {
+       switch (adapter->hw.device_id) {
        case E1000_DEV_ID_82542:
        case E1000_DEV_ID_82543GC_FIBER:
        case E1000_DEV_ID_82543GC_COPPER:
@@ -1660,24 +1690,25 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 
        case E1000_DEV_ID_82546EB_FIBER:
        case E1000_DEV_ID_82546GB_FIBER:
+       case E1000_DEV_ID_82571EB_FIBER:
                /* Wake events only supported on port A for dual fiber */
-               if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
+               if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
                        return wol->wolopts ? -EOPNOTSUPP : 0;
                /* Fall Through */
 
        default:
-               if(wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+               if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
                        return -EOPNOTSUPP;
 
                adapter->wol = 0;
 
-               if(wol->wolopts & WAKE_UCAST)
+               if (wol->wolopts & WAKE_UCAST)
                        adapter->wol |= E1000_WUFC_EX;
-               if(wol->wolopts & WAKE_MCAST)
+               if (wol->wolopts & WAKE_MCAST)
                        adapter->wol |= E1000_WUFC_MC;
-               if(wol->wolopts & WAKE_BCAST)
+               if (wol->wolopts & WAKE_BCAST)
                        adapter->wol |= E1000_WUFC_BC;
-               if(wol->wolopts & WAKE_MAGIC)
+               if (wol->wolopts & WAKE_MAGIC)
                        adapter->wol |= E1000_WUFC_MAG;
        }
 
@@ -1695,7 +1726,7 @@ e1000_led_blink_callback(unsigned long data)
 {
        struct e1000_adapter *adapter = (struct e1000_adapter *) data;
 
-       if(test_and_change_bit(E1000_LED_ON, &adapter->led_status))
+       if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
                e1000_led_off(&adapter->hw);
        else
                e1000_led_on(&adapter->hw);
@@ -1708,11 +1739,11 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
-       if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
+       if (!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
                data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
 
-       if(adapter->hw.mac_type < e1000_82571) {
-               if(!adapter->blink_timer.function) {
+       if (adapter->hw.mac_type < e1000_82571) {
+               if (!adapter->blink_timer.function) {
                        init_timer(&adapter->blink_timer);
                        adapter->blink_timer.function = e1000_led_blink_callback;
                        adapter->blink_timer.data = (unsigned long) adapter;
@@ -1721,21 +1752,21 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
                mod_timer(&adapter->blink_timer, jiffies);
                msleep_interruptible(data * 1000);
                del_timer_sync(&adapter->blink_timer);
-       }
-       else if(adapter->hw.mac_type < e1000_82573) {
-               E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE |
-                       E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
-                       (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
-                       (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
-                       (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
+       } else if (adapter->hw.mac_type < e1000_82573) {
+               E1000_WRITE_REG(&adapter->hw, LEDCTL,
+                       (E1000_LEDCTL_LED2_BLINK_RATE |
+                        E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
+                        (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
+                        (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
+                        (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
                msleep_interruptible(data * 1000);
-       }
-       else {
-               E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE |
-                       E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK | 
-                       (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
-                       (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
-                       (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
+       } else {
+               E1000_WRITE_REG(&adapter->hw, LEDCTL,
+                       (E1000_LEDCTL_LED2_BLINK_RATE |
+                        E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
+                        (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
+                        (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
+                        (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
                msleep_interruptible(data * 1000);
        }
 
@@ -1750,50 +1781,89 @@ static int
 e1000_nway_reset(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       if(netif_running(netdev)) {
+       if (netif_running(netdev)) {
                e1000_down(adapter);
                e1000_up(adapter);
        }
        return 0;
 }
 
-static int 
+static int
 e1000_get_stats_count(struct net_device *netdev)
 {
        return E1000_STATS_LEN;
 }
 
-static void 
-e1000_get_ethtool_stats(struct net_device *netdev, 
+static void
+e1000_get_ethtool_stats(struct net_device *netdev,
                struct ethtool_stats *stats, uint64_t *data)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_E1000_MQ
+       uint64_t *queue_stat;
+       int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t);
+       int j, k;
+#endif
        int i;
 
        e1000_update_stats(adapter);
-       for(i = 0; i < E1000_STATS_LEN; i++) {
-               char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;  
-               data[i] = (e1000_gstrings_stats[i].sizeof_stat == 
+       for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+               char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
+               data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
                        sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
        }
+#ifdef CONFIG_E1000_MQ
+       for (j = 0; j < adapter->num_tx_queues; j++) {
+               queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats;
+               for (k = 0; k < stat_count; k++)
+                       data[i + k] = queue_stat[k];
+               i += k;
+       }
+       for (j = 0; j < adapter->num_rx_queues; j++) {
+               queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats;
+               for (k = 0; k < stat_count; k++)
+                       data[i + k] = queue_stat[k];
+               i += k;
+       }
+#endif
+/*     BUG_ON(i != E1000_STATS_LEN); */
 }
 
-static void 
+static void
 e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
 {
+#ifdef CONFIG_E1000_MQ
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+#endif
+       uint8_t *p = data;
        int i;
 
-       switch(stringset) {
+       switch (stringset) {
        case ETH_SS_TEST:
-               memcpy(data, *e1000_gstrings_test, 
+               memcpy(data, *e1000_gstrings_test,
                        E1000_TEST_LEN*ETH_GSTRING_LEN);
                break;
        case ETH_SS_STATS:
-               for (i=0; i < E1000_STATS_LEN; i++) {
-                       memcpy(data + i * ETH_GSTRING_LEN, 
-                       e1000_gstrings_stats[i].stat_string,
-                       ETH_GSTRING_LEN);
+               for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+                       memcpy(p, e1000_gstrings_stats[i].stat_string,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
                }
+#ifdef CONFIG_E1000_MQ
+               for (i = 0; i < adapter->num_tx_queues; i++) {
+                       sprintf(p, "tx_queue_%u_packets", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "tx_queue_%u_bytes", i);
+                       p += ETH_GSTRING_LEN;
+               }
+               for (i = 0; i < adapter->num_rx_queues; i++) {
+                       sprintf(p, "rx_queue_%u_packets", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_queue_%u_bytes", i);
+                       p += ETH_GSTRING_LEN;
+               }
+#endif
+/*             BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
                break;
        }
 }
index 136fc031e4ad555d168a8b0ac002b460ebea8ebb..beeec0fbbeac9861a242c49590450a4ad47db088 100644 (file)
@@ -318,6 +318,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
     case E1000_DEV_ID_82546GB_FIBER:
     case E1000_DEV_ID_82546GB_SERDES:
     case E1000_DEV_ID_82546GB_PCIE:
+    case E1000_DEV_ID_82546GB_QUAD_COPPER:
+    case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
         hw->mac_type = e1000_82546_rev_3;
         break;
     case E1000_DEV_ID_82541EI:
@@ -639,6 +641,7 @@ e1000_init_hw(struct e1000_hw *hw)
     uint16_t cmd_mmrbc;
     uint16_t stat_mmrbc;
     uint32_t mta_size;
+    uint32_t ctrl_ext;
 
     DEBUGFUNC("e1000_init_hw");
 
@@ -735,7 +738,6 @@ e1000_init_hw(struct e1000_hw *hw)
             break;
         case e1000_82571:
         case e1000_82572:
-            ctrl |= (1 << 22);
         case e1000_82573:
             ctrl |= E1000_TXDCTL_COUNT_DESC;
             break;
@@ -775,6 +777,15 @@ e1000_init_hw(struct e1000_hw *hw)
      */
     e1000_clear_hw_cntrs(hw);
 
+    if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
+        hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
+        ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+        /* Relaxed ordering must be disabled to avoid a parity
+         * error crash in a PCI slot. */
+        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+    }
+
     return ret_val;
 }
 
@@ -838,6 +849,11 @@ e1000_setup_link(struct e1000_hw *hw)
 
     DEBUGFUNC("e1000_setup_link");
 
+    /* In the case of the phy reset being blocked, we already have a link.
+     * We do not have to set it up again. */
+    if (e1000_check_phy_reset_block(hw))
+        return E1000_SUCCESS;
+
     /* Read and store word 0x0F of the EEPROM. This word contains bits
      * that determine the hardware's default PAUSE (flow control) mode,
      * a bit that determines whether the HW defaults to enabling or
@@ -1584,10 +1600,10 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
     if(ret_val)
         return ret_val;
 
-        /* Read the MII 1000Base-T Control Register (Address 9). */
-        ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
-        if(ret_val)
-            return ret_val;
+    /* Read the MII 1000Base-T Control Register (Address 9). */
+    ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+    if(ret_val)
+        return ret_val;
 
     /* Need to parse both autoneg_advertised and fc and set up
      * the appropriate PHY registers.  First we will parse for
@@ -1929,14 +1945,19 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
 void
 e1000_config_collision_dist(struct e1000_hw *hw)
 {
-    uint32_t tctl;
+    uint32_t tctl, coll_dist;
 
     DEBUGFUNC("e1000_config_collision_dist");
 
+    if (hw->mac_type < e1000_82543)
+        coll_dist = E1000_COLLISION_DISTANCE_82542;
+    else
+        coll_dist = E1000_COLLISION_DISTANCE;
+
     tctl = E1000_READ_REG(hw, TCTL);
 
     tctl &= ~E1000_TCTL_COLD;
-    tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+    tctl |= coll_dist << E1000_COLD_SHIFT;
 
     E1000_WRITE_REG(hw, TCTL, tctl);
     E1000_WRITE_FLUSH(hw);
@@ -2982,6 +3003,8 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
         
         if (hw->mac_type < e1000_82571) 
             msec_delay(10);
+        else
+            udelay(100);
         
         E1000_WRITE_REG(hw, CTRL, ctrl);
         E1000_WRITE_FLUSH(hw);
@@ -3881,17 +3904,19 @@ e1000_read_eeprom(struct e1000_hw *hw,
         return -E1000_ERR_EEPROM;
     }
 
-    /* FLASH reads without acquiring the semaphore are safe in 82573-based
-     * controllers.
-     */
-    if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
-        (hw->mac_type != e1000_82573)) {
-        /* Prepare the EEPROM for reading  */
-        if(e1000_acquire_eeprom(hw) != E1000_SUCCESS)
-            return -E1000_ERR_EEPROM;
+    /* FLASH reads without acquiring the semaphore are safe */
+    if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
+    hw->eeprom.use_eerd == FALSE) {
+        switch (hw->mac_type) {
+        default:
+            /* Prepare the EEPROM for reading  */
+            if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+                return -E1000_ERR_EEPROM;
+            break;
+        }
     }
 
-    if(eeprom->use_eerd == TRUE) {
+    if (eeprom->use_eerd == TRUE) {
         ret_val = e1000_read_eeprom_eerd(hw, offset, words, data);
         if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
             (hw->mac_type != e1000_82573))
@@ -4398,7 +4423,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
             return -E1000_ERR_EEPROM;
         }
 
-       /* If STM opcode located in bits 15:8 of flop, reset firmware */
+        /* If STM opcode located in bits 15:8 of flop, reset firmware */
         if ((flop & 0xFF00) == E1000_STM_OPCODE) {
             E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET);
         }
@@ -4406,7 +4431,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
         /* Perform the flash update */
         E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD);
 
-       for (i=0; i < attempts; i++) {
+        for (i=0; i < attempts; i++) {
             eecd = E1000_READ_REG(hw, EECD);
             if ((eecd & E1000_EECD_FLUPD) == 0) {
                 break;
@@ -4479,6 +4504,7 @@ e1000_read_mac_addr(struct e1000_hw * hw)
         hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
         hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
     }
+
     switch (hw->mac_type) {
     default:
         break;
@@ -6720,6 +6746,12 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
         break;
     }
 
+    /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
+     * Need to wait for PHY configuration completion before accessing NVM
+     * and PHY. */
+    if (hw->mac_type == e1000_82573)
+        msec_delay(25);
+
     return E1000_SUCCESS;
 }
 
@@ -6809,7 +6841,8 @@ int32_t
 e1000_check_phy_reset_block(struct e1000_hw *hw)
 {
     uint32_t manc = 0;
-    if(hw->mac_type > e1000_82547_rev_2)
+
+    if (hw->mac_type > e1000_82547_rev_2)
         manc = E1000_READ_REG(hw, MANC);
     return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
            E1000_BLK_PHY_RESET : E1000_SUCCESS;
index 7caa35748ceac9c8236d1510f9335b2e57f9073b..f1219dd9dbac2b41a727db0d90b29bf67f092f4f 100644 (file)
@@ -377,6 +377,7 @@ int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
 void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
 
 /* Filters (multicast, vlan, receive) */
+void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
 uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
 void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
 void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
@@ -401,7 +402,9 @@ void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
 void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
 /* Port I/O is only supported on 82544 and newer */
 uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
+uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
 void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
+void e1000_enable_pciex_master(struct e1000_hw *hw);
 int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
 int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
 void e1000_release_software_semaphore(struct e1000_hw *hw);
@@ -439,6 +442,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
 #define E1000_DEV_ID_82546GB_FIBER       0x107A
 #define E1000_DEV_ID_82546GB_SERDES      0x107B
 #define E1000_DEV_ID_82546GB_PCIE        0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
 #define E1000_DEV_ID_82547EI             0x1019
 #define E1000_DEV_ID_82571EB_COPPER      0x105E
 #define E1000_DEV_ID_82571EB_FIBER       0x105F
@@ -449,6 +453,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
 #define E1000_DEV_ID_82573E              0x108B
 #define E1000_DEV_ID_82573E_IAMT         0x108C
 #define E1000_DEV_ID_82573L              0x109A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
 
 
 #define NODE_ADDRESS_SIZE 6
@@ -897,14 +902,14 @@ struct e1000_ffvt_entry {
 #define E1000_TXDCTL   0x03828  /* TX Descriptor Control - RW */
 #define E1000_TADV     0x0382C  /* TX Interrupt Absolute Delay Val - RW */
 #define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
-#define E1000_TARC0    0x03840 /* TX Arbitration Count (0) */
-#define E1000_TDBAL1   0x03900 /* TX Desc Base Address Low (1) - RW */
-#define E1000_TDBAH1   0x03904 /* TX Desc Base Address High (1) - RW */
-#define E1000_TDLEN1   0x03908 /* TX Desc Length (1) - RW */
-#define E1000_TDH1     0x03910 /* TX Desc Head (1) - RW */
-#define E1000_TDT1     0x03918 /* TX Desc Tail (1) - RW */
-#define E1000_TXDCTL1  0x03928 /* TX Descriptor Control (1) - RW */
-#define E1000_TARC1    0x03940 /* TX Arbitration Count (1) */
+#define E1000_TARC0    0x03840  /* TX Arbitration Count (0) */
+#define E1000_TDBAL1   0x03900  /* TX Desc Base Address Low (1) - RW */
+#define E1000_TDBAH1   0x03904  /* TX Desc Base Address High (1) - RW */
+#define E1000_TDLEN1   0x03908  /* TX Desc Length (1) - RW */
+#define E1000_TDH1     0x03910  /* TX Desc Head (1) - RW */
+#define E1000_TDT1     0x03918  /* TX Desc Tail (1) - RW */
+#define E1000_TXDCTL1  0x03928  /* TX Descriptor Control (1) - RW */
+#define E1000_TARC1    0x03940  /* TX Arbitration Count (1) */
 #define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
 #define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
 #define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
@@ -1497,6 +1502,7 @@ struct e1000_hw {
 #define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
 #define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
 #define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
 #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
 #define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
@@ -1758,7 +1764,6 @@ struct e1000_hw {
 #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
 #define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
                                               still to be processed. */
-
 /* Transmit Configuration Word */
 #define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
 #define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */
@@ -1954,6 +1959,23 @@ struct e1000_host_command_info {
 
 #define E1000_MDALIGN          4096
 
+/* PCI-Ex registers */
+
+/* PCI-Ex Control Register */
+#define E1000_GCR_RXD_NO_SNOOP                 0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP              0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP              0x00000004
+#define E1000_GCR_TXD_NO_SNOOP                 0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP              0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP              0x00000020
+
+#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP            | \
+                                                        E1000_GCR_RXDSCW_NO_SNOOP      | \
+                                                        E1000_GCR_RXDSCR_NO_SNOOP      | \
+                                                        E1000_GCR TXD_NO_SNOOP         | \
+                                                        E1000_GCR_TXDSCW_NO_SNOOP      | \
+                                                        E1000_GCR_TXDSCR_NO_SNOOP)
+
 #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
 /* Function Active and Power State to MNG */
 #define E1000_FACTPS_FUNC0_POWER_STATE_MASK         0x00000003
@@ -2077,7 +2099,10 @@ struct e1000_host_command_info {
 /* Collision related configuration parameters */
 #define E1000_COLLISION_THRESHOLD       15
 #define E1000_CT_SHIFT                  4
-#define E1000_COLLISION_DISTANCE        64
+/* Collision distance is a 0-based value that applies to
+ * half-duplex-capable hardware only. */
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLLISION_DISTANCE_82542  64
 #define E1000_FDX_COLLISION_DISTANCE    E1000_COLLISION_DISTANCE
 #define E1000_HDX_COLLISION_DISTANCE    E1000_COLLISION_DISTANCE
 #define E1000_COLD_SHIFT                12
index 438a931fd55df4c6dd57e00d9480f35c4e9fb461..31e332935e5a5dd335e14cbf50091f72197103e7 100644 (file)
 #include "e1000.h"
 
 /* Change Log
- * 6.0.58       4/20/05
- *   o Accepted ethtool cleanup patch from Stephen Hemminger 
- * 6.0.44+     2/15/05
- *   o applied Anton's patch to resolve tx hang in hardware
- *   o Applied Andrew Mortons patch - e1000 stops working after resume
+ * 6.3.9       12/16/2005
+ *   o incorporate fix for recycled skbs from IBM LTC
+ * 6.3.7       11/18/2005
+ *   o Honor eeprom setting for enabling/disabling Wake On Lan
+ * 6.3.5       11/17/2005
+ *   o Fix memory leak in rx ring handling for PCI Express adapters
+ * 6.3.4       11/8/05
+ *   o Patch from Jesper Juhl to remove redundant NULL checks for kfree
+ * 6.3.2       9/20/05
+ *   o Render logic that sets/resets DRV_LOAD as inline functions to 
+ *     avoid code replication. If f/w is AMT then set DRV_LOAD only when
+ *     network interface is open.
+ *   o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
+ *   o Adjust PBA partioning for Jumbo frames using MTU size and not
+ *     rx_buffer_len
+ * 6.3.1       9/19/05
+ *   o Use adapter->tx_timeout_factor in Tx Hung Detect logic 
+       (e1000_clean_tx_irq)
+ *   o Support for 8086:10B5 device (Quad Port)
+ * 6.2.14      9/15/05
+ *   o In AMT enabled configurations, set/reset DRV_LOAD bit on interface 
+ *     open/close 
+ * 6.2.13       9/14/05
+ *   o Invoke e1000_check_mng_mode only for 8257x controllers since it 
+ *     accesses the FWSM that is not supported in other controllers
+ * 6.2.12       9/9/05
+ *   o Add support for device id E1000_DEV_ID_82546GB_QUAD_COPPER
+ *   o set RCTL:SECRC only for controllers newer than 82543. 
+ *   o When the n/w interface comes down reset DRV_LOAD bit to notify f/w.
+ *     This code was moved from e1000_remove to e1000_close
+ * 6.2.10       9/6/05
+ *   o Fix error in updating RDT in el1000_alloc_rx_buffers[_ps] -- one off.
+ *   o Enable fc by default on 82573 controllers (do not read eeprom)
+ *   o Fix rx_errors statistic not to include missed_packet_count
+ *   o Fix rx_dropped statistic not to include missed_packet_count 
+       (Padraig Brady)
+ * 6.2.9        8/30/05
+ *   o Remove call to update statistics from the controller ib e1000_get_stats
+ * 6.2.8        8/30/05
+ *   o Improved algorithm for rx buffer allocation/rdt update
+ *   o Flow control watermarks relative to rx PBA size
+ *   o Simplified 'Tx Hung' detect logic
+ * 6.2.7       8/17/05
+ *   o Report rx buffer allocation failures and tx timeout counts in stats
+ * 6.2.6       8/16/05
+ *   o Implement workaround for controller erratum -- linear non-tso packet
+ *     following a TSO gets written back prematurely
+ * 6.2.5       8/15/05
+ *   o Set netdev->tx_queue_len based on link speed/duplex settings.
+ *   o Fix net_stats.rx_fifo_errors <p@draigBrady.com>
+ *   o Do not power off PHY if SoL/IDER session is active
+ * 6.2.4       8/10/05
+ *   o Fix loopback test setup/cleanup for 82571/3 controllers
+ *   o Fix parsing of outgoing packets (e1000_transfer_dhcp_info) to treat
+ *     all packets as raw
+ *   o Prevent operations that will cause the PHY to be reset if SoL/IDER
+ *     sessions are active and log a message
+ * 6.2.2       7/21/05
+ *   o used fixed size descriptors for all MTU sizes, reduces memory load
+ * 6.1.2       4/13/05
+ *   o Fixed ethtool diagnostics
+ *   o Enabled flow control to take default eeprom settings
+ *   o Added stats_lock around e1000_read_phy_reg commands to avoid concurrent
+ *     calls, one from mii_ioctl and other from within update_stats while 
+ *     processing MIIREG ioctl.
  */
 
 char e1000_driver_name[] = "e1000";
@@ -43,7 +103,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-#define DRV_VERSION "6.1.16-k2"DRIVERNAPI
+#define DRV_VERSION "6.3.9-k2"DRIVERNAPI
 char e1000_driver_version[] = DRV_VERSION;
 static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
 
@@ -97,7 +157,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
        INTEL_E1000_ETHERNET_DEVICE(0x108A),
        INTEL_E1000_ETHERNET_DEVICE(0x108B),
        INTEL_E1000_ETHERNET_DEVICE(0x108C),
+       INTEL_E1000_ETHERNET_DEVICE(0x1099),
        INTEL_E1000_ETHERNET_DEVICE(0x109A),
+       INTEL_E1000_ETHERNET_DEVICE(0x10B5),
        /* required last entry */
        {0,}
 };
@@ -171,9 +233,11 @@ static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                                        struct e1000_rx_ring *rx_ring);
 #endif
 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
-                                   struct e1000_rx_ring *rx_ring);
+                                   struct e1000_rx_ring *rx_ring,
+                                  int cleaned_count);
 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
-                                      struct e1000_rx_ring *rx_ring);
+                                      struct e1000_rx_ring *rx_ring,
+                                     int cleaned_count);
 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
                           int cmd);
@@ -291,7 +355,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
 static inline void
 e1000_irq_enable(struct e1000_adapter *adapter)
 {
-       if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
+       if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
                E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
                E1000_WRITE_FLUSH(&adapter->hw);
        }
@@ -303,23 +367,91 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        uint16_t vid = adapter->hw.mng_cookie.vlan_id;
        uint16_t old_vid = adapter->mng_vlan_id;
-       if(adapter->vlgrp) {
-               if(!adapter->vlgrp->vlan_devices[vid]) {
-                       if(adapter->hw.mng_cookie.status &
+       if (adapter->vlgrp) {
+               if (!adapter->vlgrp->vlan_devices[vid]) {
+                       if (adapter->hw.mng_cookie.status &
                                E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
                                e1000_vlan_rx_add_vid(netdev, vid);
                                adapter->mng_vlan_id = vid;
                        } else
                                adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
-                               
-                       if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
-                                       (vid != old_vid) && 
+
+                       if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
+                                       (vid != old_vid) &&
                                        !adapter->vlgrp->vlan_devices[old_vid])
                                e1000_vlan_rx_kill_vid(netdev, old_vid);
                }
        }
 }
-       
+
+/**
+ * e1000_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the netowrk i/f is closed.
+ * 
+ **/
+
+static inline void 
+e1000_release_hw_control(struct e1000_adapter *adapter)
+{
+       uint32_t ctrl_ext;
+       uint32_t swsm;
+
+       /* Let firmware taken over control of h/w */
+       switch (adapter->hw.mac_type) {
+       case e1000_82571:
+       case e1000_82572:
+               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+                               ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+               break;
+       case e1000_82573:
+               swsm = E1000_READ_REG(&adapter->hw, SWSM);
+               E1000_WRITE_REG(&adapter->hw, SWSM,
+                               swsm & ~E1000_SWSM_DRV_LOAD);
+       default:
+               break;
+       }
+}
+
+/**
+ * e1000_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that 
+ * the driver is loaded. For AMT version (only with 82573) 
+ * of the f/w this means that the netowrk i/f is open.
+ * 
+ **/
+
+static inline void 
+e1000_get_hw_control(struct e1000_adapter *adapter)
+{
+       uint32_t ctrl_ext;
+       uint32_t swsm;
+       /* Let firmware know the driver has taken over */
+       switch (adapter->hw.mac_type) {
+       case e1000_82571:
+       case e1000_82572:
+               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+                               ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+               break;
+       case e1000_82573:
+               swsm = E1000_READ_REG(&adapter->hw, SWSM);
+               E1000_WRITE_REG(&adapter->hw, SWSM,
+                               swsm | E1000_SWSM_DRV_LOAD);
+               break;
+       default:
+               break;
+       }
+}
+
 int
 e1000_up(struct e1000_adapter *adapter)
 {
@@ -329,10 +461,10 @@ e1000_up(struct e1000_adapter *adapter)
        /* hardware has been reset, we need to reload some things */
 
        /* Reset the PHY if it was previously powered down */
-       if(adapter->hw.media_type == e1000_media_type_copper) {
+       if (adapter->hw.media_type == e1000_media_type_copper) {
                uint16_t mii_reg;
                e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
-               if(mii_reg & MII_CR_POWER_DOWN)
+               if (mii_reg & MII_CR_POWER_DOWN)
                        e1000_phy_reset(&adapter->hw);
        }
 
@@ -343,20 +475,26 @@ e1000_up(struct e1000_adapter *adapter)
        e1000_configure_tx(adapter);
        e1000_setup_rctl(adapter);
        e1000_configure_rx(adapter);
-       for (i = 0; i < adapter->num_queues; i++)
-               adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]);
+       /* call E1000_DESC_UNUSED which always leaves
+        * at least 1 descriptor unused to make sure
+        * next_to_use != next_to_clean */
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               struct e1000_rx_ring *ring = &adapter->rx_ring[i];
+               adapter->alloc_rx_buf(adapter, ring,
+                                     E1000_DESC_UNUSED(ring));
+       }
 
 #ifdef CONFIG_PCI_MSI
-       if(adapter->hw.mac_type > e1000_82547_rev_2) {
+       if (adapter->hw.mac_type > e1000_82547_rev_2) {
                adapter->have_msi = TRUE;
-               if((err = pci_enable_msi(adapter->pdev))) {
+               if ((err = pci_enable_msi(adapter->pdev))) {
                        DPRINTK(PROBE, ERR,
                         "Unable to allocate MSI interrupt Error: %d\n", err);
                        adapter->have_msi = FALSE;
                }
        }
 #endif
-       if((err = request_irq(adapter->pdev->irq, &e1000_intr,
+       if ((err = request_irq(adapter->pdev->irq, &e1000_intr,
                              SA_SHIRQ | SA_SAMPLE_RANDOM,
                              netdev->name, netdev))) {
                DPRINTK(PROBE, ERR,
@@ -364,6 +502,12 @@ e1000_up(struct e1000_adapter *adapter)
                return err;
        }
 
+#ifdef CONFIG_E1000_MQ
+       e1000_setup_queue_mapping(adapter);
+#endif
+
+       adapter->tx_queue_len = netdev->tx_queue_len;
+
        mod_timer(&adapter->watchdog_timer, jiffies);
 
 #ifdef CONFIG_E1000_NAPI
@@ -378,6 +522,8 @@ void
 e1000_down(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
+                                    e1000_check_mng_mode(&adapter->hw);
 
        e1000_irq_disable(adapter);
 #ifdef CONFIG_E1000_MQ
@@ -385,7 +531,7 @@ e1000_down(struct e1000_adapter *adapter)
 #endif
        free_irq(adapter->pdev->irq, netdev);
 #ifdef CONFIG_PCI_MSI
-       if(adapter->hw.mac_type > e1000_82547_rev_2 &&
+       if (adapter->hw.mac_type > e1000_82547_rev_2 &&
           adapter->have_msi == TRUE)
                pci_disable_msi(adapter->pdev);
 #endif
@@ -396,6 +542,7 @@ e1000_down(struct e1000_adapter *adapter)
 #ifdef CONFIG_E1000_NAPI
        netif_poll_disable(netdev);
 #endif
+       netdev->tx_queue_len = adapter->tx_queue_len;
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
        netif_carrier_off(netdev);
@@ -405,12 +552,16 @@ e1000_down(struct e1000_adapter *adapter)
        e1000_clean_all_tx_rings(adapter);
        e1000_clean_all_rx_rings(adapter);
 
-       /* If WoL is not enabled and management mode is not IAMT
-        * Power down the PHY so no link is implied when interface is down */
-       if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
+       /* Power down the PHY so no link is implied when interface is down *
+        * The PHY cannot be powered down if any of the following is TRUE *
+        * (a) WoL is enabled
+        * (b) AMT is active
+        * (c) SoL/IDER session is active */
+       if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
           adapter->hw.media_type == e1000_media_type_copper &&
-          !e1000_check_mng_mode(&adapter->hw) &&
-          !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) {
+          !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
+          !mng_mode_enabled &&
+          !e1000_check_phy_reset_block(&adapter->hw)) {
                uint16_t mii_reg;
                e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
                mii_reg |= MII_CR_POWER_DOWN;
@@ -422,10 +573,8 @@ e1000_down(struct e1000_adapter *adapter)
 void
 e1000_reset(struct e1000_adapter *adapter)
 {
-       struct net_device *netdev = adapter->netdev;
        uint32_t pba, manc;
        uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
-       uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
 
        /* Repartition Pba for greater than 9k mtu
         * To take effect CTRL.RST is required.
@@ -448,19 +597,12 @@ e1000_reset(struct e1000_adapter *adapter)
                break;
        }
 
-       if((adapter->hw.mac_type != e1000_82573) &&
-          (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) {
+       if ((adapter->hw.mac_type != e1000_82573) &&
+          (adapter->netdev->mtu > E1000_RXBUFFER_8192))
                pba -= 8; /* allocate more FIFO for Tx */
-               /* send an XOFF when there is enough space in the
-                * Rx FIFO to hold one extra full size Rx packet 
-               */
-               fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE + 
-                                       ETHERNET_FCS_SIZE + 1;
-               fc_low_water_mark = fc_high_water_mark + 8;
-       }
 
 
-       if(adapter->hw.mac_type == e1000_82547) {
+       if (adapter->hw.mac_type == e1000_82547) {
                adapter->tx_fifo_head = 0;
                adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
                adapter->tx_fifo_size =
@@ -471,19 +613,21 @@ e1000_reset(struct e1000_adapter *adapter)
        E1000_WRITE_REG(&adapter->hw, PBA, pba);
 
        /* flow control settings */
-       adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
-                                   fc_high_water_mark;
-       adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
-                                  fc_low_water_mark;
+       /* Set the FC high water mark to 90% of the FIFO size.
+        * Required to clear last 3 LSB */
+       fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
+
+       adapter->hw.fc_high_water = fc_high_water_mark;
+       adapter->hw.fc_low_water = fc_high_water_mark - 8;
        adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
        adapter->hw.fc_send_xon = 1;
        adapter->hw.fc = adapter->hw.original_fc;
 
        /* Allow time for pending master requests to run */
        e1000_reset_hw(&adapter->hw);
-       if(adapter->hw.mac_type >= e1000_82544)
+       if (adapter->hw.mac_type >= e1000_82544)
                E1000_WRITE_REG(&adapter->hw, WUC, 0);
-       if(e1000_init_hw(&adapter->hw))
+       if (e1000_init_hw(&adapter->hw))
                DPRINTK(PROBE, ERR, "Hardware Error\n");
        e1000_update_mng_vlan(adapter);
        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -517,33 +661,31 @@ e1000_probe(struct pci_dev *pdev,
        struct net_device *netdev;
        struct e1000_adapter *adapter;
        unsigned long mmio_start, mmio_len;
-       uint32_t ctrl_ext;
-       uint32_t swsm;
 
        static int cards_found = 0;
        int i, err, pci_using_dac;
        uint16_t eeprom_data;
        uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
-       if((err = pci_enable_device(pdev)))
+       if ((err = pci_enable_device(pdev)))
                return err;
 
-       if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+       if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
                pci_using_dac = 1;
        } else {
-               if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
+               if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
                        E1000_ERR("No usable DMA configuration, aborting\n");
                        return err;
                }
                pci_using_dac = 0;
        }
 
-       if((err = pci_request_regions(pdev, e1000_driver_name)))
+       if ((err = pci_request_regions(pdev, e1000_driver_name)))
                return err;
 
        pci_set_master(pdev);
 
        netdev = alloc_etherdev(sizeof(struct e1000_adapter));
-       if(!netdev) {
+       if (!netdev) {
                err = -ENOMEM;
                goto err_alloc_etherdev;
        }
@@ -562,15 +704,15 @@ e1000_probe(struct pci_dev *pdev,
        mmio_len = pci_resource_len(pdev, BAR_0);
 
        adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
-       if(!adapter->hw.hw_addr) {
+       if (!adapter->hw.hw_addr) {
                err = -EIO;
                goto err_ioremap;
        }
 
-       for(i = BAR_1; i <= BAR_5; i++) {
-               if(pci_resource_len(pdev, i) == 0)
+       for (i = BAR_1; i <= BAR_5; i++) {
+               if (pci_resource_len(pdev, i) == 0)
                        continue;
-               if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+               if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
                        adapter->hw.io_base = pci_resource_start(pdev, i);
                        break;
                }
@@ -607,13 +749,13 @@ e1000_probe(struct pci_dev *pdev,
 
        /* setup the private structure */
 
-       if((err = e1000_sw_init(adapter)))
+       if ((err = e1000_sw_init(adapter)))
                goto err_sw_init;
 
-       if((err = e1000_check_phy_reset_block(&adapter->hw)))
+       if ((err = e1000_check_phy_reset_block(&adapter->hw)))
                DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
 
-       if(adapter->hw.mac_type >= e1000_82543) {
+       if (adapter->hw.mac_type >= e1000_82543) {
                netdev->features = NETIF_F_SG |
                                   NETIF_F_HW_CSUM |
                                   NETIF_F_HW_VLAN_TX |
@@ -622,16 +764,16 @@ e1000_probe(struct pci_dev *pdev,
        }
 
 #ifdef NETIF_F_TSO
-       if((adapter->hw.mac_type >= e1000_82544) &&
+       if ((adapter->hw.mac_type >= e1000_82544) &&
           (adapter->hw.mac_type != e1000_82547))
                netdev->features |= NETIF_F_TSO;
 
 #ifdef NETIF_F_TSO_IPV6
-       if(adapter->hw.mac_type > e1000_82547_rev_2)
+       if (adapter->hw.mac_type > e1000_82547_rev_2)
                netdev->features |= NETIF_F_TSO_IPV6;
 #endif
 #endif
-       if(pci_using_dac)
+       if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
        /* hard_start_xmit is safe against parallel locking */
@@ -639,14 +781,14 @@ e1000_probe(struct pci_dev *pdev,
  
        adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
 
-       /* before reading the EEPROM, reset the controller to 
+       /* before reading the EEPROM, reset the controller to
         * put the device in a known good starting state */
-       
+
        e1000_reset_hw(&adapter->hw);
 
        /* make sure the EEPROM is good */
 
-       if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
+       if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
                DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
                err = -EIO;
                goto err_eeprom;
@@ -654,12 +796,12 @@ e1000_probe(struct pci_dev *pdev,
 
        /* copy the MAC address out of the EEPROM */
 
-       if(e1000_read_mac_addr(&adapter->hw))
+       if (e1000_read_mac_addr(&adapter->hw))
                DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
        memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
        memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
 
-       if(!is_valid_ether_addr(netdev->perm_addr)) {
+       if (!is_valid_ether_addr(netdev->perm_addr)) {
                DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
                err = -EIO;
                goto err_eeprom;
@@ -699,7 +841,7 @@ e1000_probe(struct pci_dev *pdev,
         * enable the ACPI Magic Packet filter
         */
 
-       switch(adapter->hw.mac_type) {
+       switch (adapter->hw.mac_type) {
        case e1000_82542_rev2_0:
        case e1000_82542_rev2_1:
        case e1000_82543:
@@ -712,8 +854,7 @@ e1000_probe(struct pci_dev *pdev,
        case e1000_82546:
        case e1000_82546_rev_3:
        case e1000_82571:
-               if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
-                  && (adapter->hw.media_type == e1000_media_type_copper)) {
+               if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
                        e1000_read_eeprom(&adapter->hw,
                                EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
                        break;
@@ -724,31 +865,42 @@ e1000_probe(struct pci_dev *pdev,
                        EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
                break;
        }
-       if(eeprom_data & eeprom_apme_mask)
+       if (eeprom_data & eeprom_apme_mask)
                adapter->wol |= E1000_WUFC_MAG;
 
+       /* print bus type/speed/width info */
+       {
+       struct e1000_hw *hw = &adapter->hw;
+       DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+               ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
+                (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
+               ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+                (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
+                (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
+                (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
+                (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
+               ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
+                (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
+                (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
+                "32-bit"));
+       }
+
+       for (i = 0; i < 6; i++)
+               printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+
        /* reset the hardware with the new settings */
        e1000_reset(adapter);
 
-       /* Let firmware know the driver has taken over */
-       switch(adapter->hw.mac_type) {
-       case e1000_82571:
-       case e1000_82572:
-               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
-               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
-                               ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
-               break;
-       case e1000_82573:
-               swsm = E1000_READ_REG(&adapter->hw, SWSM);
-               E1000_WRITE_REG(&adapter->hw, SWSM,
-                               swsm | E1000_SWSM_DRV_LOAD);
-               break;
-       default:
-               break;
-       }
+       /* If the controller is 82573 and f/w is AMT, do not set
+        * DRV_LOAD until the interface is up.  For all other cases,
+        * let the f/w know that the h/w is now under the control
+        * of the driver. */
+       if (adapter->hw.mac_type != e1000_82573 ||
+           !e1000_check_mng_mode(&adapter->hw))
+               e1000_get_hw_control(adapter);
 
        strcpy(netdev->name, "eth%d");
-       if((err = register_netdev(netdev)))
+       if ((err = register_netdev(netdev)))
                goto err_register;
 
        DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
@@ -782,47 +934,33 @@ e1000_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       uint32_t ctrl_ext;
-       uint32_t manc, swsm;
+       uint32_t manc;
 #ifdef CONFIG_E1000_NAPI
        int i;
 #endif
 
        flush_scheduled_work();
 
-       if(adapter->hw.mac_type >= e1000_82540 &&
+       if (adapter->hw.mac_type >= e1000_82540 &&
           adapter->hw.media_type == e1000_media_type_copper) {
                manc = E1000_READ_REG(&adapter->hw, MANC);
-               if(manc & E1000_MANC_SMBUS_EN) {
+               if (manc & E1000_MANC_SMBUS_EN) {
                        manc |= E1000_MANC_ARP_EN;
                        E1000_WRITE_REG(&adapter->hw, MANC, manc);
                }
        }
 
-       switch(adapter->hw.mac_type) {
-       case e1000_82571:
-       case e1000_82572:
-               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
-               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
-                               ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
-               break;
-       case e1000_82573:
-               swsm = E1000_READ_REG(&adapter->hw, SWSM);
-               E1000_WRITE_REG(&adapter->hw, SWSM,
-                               swsm & ~E1000_SWSM_DRV_LOAD);
-               break;
-
-       default:
-               break;
-       }
+       /* Release control of h/w to f/w.  If f/w is AMT enabled, this
+        * would have already happened in close and is redundant. */
+       e1000_release_hw_control(adapter);
 
        unregister_netdev(netdev);
 #ifdef CONFIG_E1000_NAPI
-       for (i = 0; i < adapter->num_queues; i++)
+       for (i = 0; i < adapter->num_rx_queues; i++)
                __dev_put(&adapter->polling_netdev[i]);
 #endif
 
-       if(!e1000_check_phy_reset_block(&adapter->hw))
+       if (!e1000_check_phy_reset_block(&adapter->hw))
                e1000_phy_hw_reset(&adapter->hw);
 
        kfree(adapter->tx_ring);
@@ -881,19 +1019,19 @@ e1000_sw_init(struct e1000_adapter *adapter)
 
        /* identify the MAC */
 
-       if(e1000_set_mac_type(hw)) {
+       if (e1000_set_mac_type(hw)) {
                DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
                return -EIO;
        }
 
        /* initialize eeprom parameters */
 
-       if(e1000_init_eeprom_params(hw)) {
+       if (e1000_init_eeprom_params(hw)) {
                E1000_ERR("EEPROM initialization failed\n");
                return -EIO;
        }
 
-       switch(hw->mac_type) {
+       switch (hw->mac_type) {
        default:
                break;
        case e1000_82541:
@@ -912,7 +1050,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
 
        /* Copper options */
 
-       if(hw->media_type == e1000_media_type_copper) {
+       if (hw->media_type == e1000_media_type_copper) {
                hw->mdix = AUTO_ALL_MODES;
                hw->disable_polarity_correction = FALSE;
                hw->master_slave = E1000_MASTER_SLAVE;
@@ -923,15 +1061,34 @@ e1000_sw_init(struct e1000_adapter *adapter)
        switch (hw->mac_type) {
        case e1000_82571:
        case e1000_82572:
-               adapter->num_queues = 2;
+               /* These controllers support 2 tx queues, but with a single
+                * qdisc implementation, multiple tx queues aren't quite as
+                * interesting.  If we can find a logical way of mapping
+                * flows to a queue, then perhaps we can up the num_tx_queue
+                * count back to its default.  Until then, we run the risk of
+                * terrible performance due to SACK overload. */
+               adapter->num_tx_queues = 1;
+               adapter->num_rx_queues = 2;
                break;
        default:
-               adapter->num_queues = 1;
+               adapter->num_tx_queues = 1;
+               adapter->num_rx_queues = 1;
                break;
        }
-       adapter->num_queues = min(adapter->num_queues, num_online_cpus());
+       adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus());
+       adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus());
+       DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n",
+               adapter->num_rx_queues,
+               ((adapter->num_rx_queues == 1)
+                ? ((num_online_cpus() > 1)
+                       ? "(due to unsupported feature in current adapter)"
+                       : "(due to unsupported system configuration)")
+                : ""));
+       DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n",
+               adapter->num_tx_queues);
 #else
-       adapter->num_queues = 1;
+       adapter->num_tx_queues = 1;
+       adapter->num_rx_queues = 1;
 #endif
 
        if (e1000_alloc_queues(adapter)) {
@@ -940,17 +1097,14 @@ e1000_sw_init(struct e1000_adapter *adapter)
        }
 
 #ifdef CONFIG_E1000_NAPI
-       for (i = 0; i < adapter->num_queues; i++) {
+       for (i = 0; i < adapter->num_rx_queues; i++) {
                adapter->polling_netdev[i].priv = adapter;
                adapter->polling_netdev[i].poll = &e1000_clean;
                adapter->polling_netdev[i].weight = 64;
                dev_hold(&adapter->polling_netdev[i]);
                set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
        }
-#endif
-
-#ifdef CONFIG_E1000_MQ
-       e1000_setup_queue_mapping(adapter);
+       spin_lock_init(&adapter->tx_queue_lock);
 #endif
 
        atomic_set(&adapter->irq_sem, 1);
@@ -973,13 +1127,13 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
 {
        int size;
 
-       size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
+       size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
        adapter->tx_ring = kmalloc(size, GFP_KERNEL);
        if (!adapter->tx_ring)
                return -ENOMEM;
        memset(adapter->tx_ring, 0, size);
 
-       size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
+       size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
        adapter->rx_ring = kmalloc(size, GFP_KERNEL);
        if (!adapter->rx_ring) {
                kfree(adapter->tx_ring);
@@ -988,7 +1142,7 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
        memset(adapter->rx_ring, 0, size);
 
 #ifdef CONFIG_E1000_NAPI
-       size = sizeof(struct net_device) * adapter->num_queues;
+       size = sizeof(struct net_device) * adapter->num_rx_queues;
        adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
        if (!adapter->polling_netdev) {
                kfree(adapter->tx_ring);
@@ -998,6 +1152,14 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
        memset(adapter->polling_netdev, 0, size);
 #endif
 
+#ifdef CONFIG_E1000_MQ
+       adapter->rx_sched_call_data.func = e1000_rx_schedule;
+       adapter->rx_sched_call_data.info = adapter->netdev;
+
+       adapter->cpu_netdev = alloc_percpu(struct net_device *);
+       adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
+#endif
+
        return E1000_SUCCESS;
 }
 
@@ -1017,14 +1179,15 @@ e1000_setup_queue_mapping(struct e1000_adapter *adapter)
        lock_cpu_hotplug();
        i = 0;
        for_each_online_cpu(cpu) {
-               *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues];
+               *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues];
                /* This is incomplete because we'd like to assign separate
                 * physical cpus to these netdev polling structures and
                 * avoid saturating a subset of cpus.
                 */
-               if (i < adapter->num_queues) {
+               if (i < adapter->num_rx_queues) {
                        *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
-                       adapter->cpu_for_queue[i] = cpu;
+                       adapter->rx_ring[i].cpu = cpu;
+                       cpu_set(cpu, adapter->cpumask);
                } else
                        *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
 
@@ -1063,14 +1226,20 @@ e1000_open(struct net_device *netdev)
        if ((err = e1000_setup_all_rx_resources(adapter)))
                goto err_setup_rx;
 
-       if((err = e1000_up(adapter)))
+       if ((err = e1000_up(adapter)))
                goto err_up;
        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
-       if((adapter->hw.mng_cookie.status &
+       if ((adapter->hw.mng_cookie.status &
                          E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
                e1000_update_mng_vlan(adapter);
        }
 
+       /* If AMT is enabled, let the firmware know that the network
+        * interface is now open */
+       if (adapter->hw.mac_type == e1000_82573 &&
+           e1000_check_mng_mode(&adapter->hw))
+               e1000_get_hw_control(adapter);
+
        return E1000_SUCCESS;
 
 err_up:
@@ -1105,10 +1274,17 @@ e1000_close(struct net_device *netdev)
        e1000_free_all_tx_resources(adapter);
        e1000_free_all_rx_resources(adapter);
 
-       if((adapter->hw.mng_cookie.status &
+       if ((adapter->hw.mng_cookie.status &
                          E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
                e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
        }
+
+       /* If AMT is enabled, let the firmware know that the network
+        * interface is now closed */
+       if (adapter->hw.mac_type == e1000_82573 &&
+           e1000_check_mng_mode(&adapter->hw))
+               e1000_release_hw_control(adapter);
+
        return 0;
 }
 
@@ -1153,7 +1329,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
        size = sizeof(struct e1000_buffer) * txdr->count;
 
        txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
-       if(!txdr->buffer_info) {
+       if (!txdr->buffer_info) {
                DPRINTK(PROBE, ERR,
                "Unable to allocate memory for the transmit descriptor ring\n");
                return -ENOMEM;
@@ -1166,7 +1342,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
        E1000_ROUNDUP(txdr->size, 4096);
 
        txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
-       if(!txdr->desc) {
+       if (!txdr->desc) {
 setup_tx_desc_die:
                vfree(txdr->buffer_info);
                DPRINTK(PROBE, ERR,
@@ -1182,8 +1358,8 @@ setup_tx_desc_die:
                                     "at %p\n", txdr->size, txdr->desc);
                /* Try again, without freeing the previous */
                txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
-               if(!txdr->desc) {
                /* Failed allocation, critical failure */
+               if (!txdr->desc) {
                        pci_free_consistent(pdev, txdr->size, olddesc, olddma);
                        goto setup_tx_desc_die;
                }
@@ -1229,7 +1405,7 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
 {
        int i, err = 0;
 
-       for (i = 0; i < adapter->num_queues; i++) {
+       for (i = 0; i < adapter->num_tx_queues; i++) {
                err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
                if (err) {
                        DPRINTK(PROBE, ERR,
@@ -1254,10 +1430,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
        uint64_t tdba;
        struct e1000_hw *hw = &adapter->hw;
        uint32_t tdlen, tctl, tipg, tarc;
+       uint32_t ipgr1, ipgr2;
 
        /* Setup the HW Tx Head and Tail descriptor pointers */
 
-       switch (adapter->num_queues) {
+       switch (adapter->num_tx_queues) {
        case 2:
                tdba = adapter->tx_ring[1].dma;
                tdlen = adapter->tx_ring[1].count *
@@ -1287,22 +1464,26 @@ e1000_configure_tx(struct e1000_adapter *adapter)
 
        /* Set the default values for the Tx Inter Packet Gap timer */
 
+       if (hw->media_type == e1000_media_type_fiber ||
+           hw->media_type == e1000_media_type_internal_serdes)
+               tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+       else
+               tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+
        switch (hw->mac_type) {
        case e1000_82542_rev2_0:
        case e1000_82542_rev2_1:
                tipg = DEFAULT_82542_TIPG_IPGT;
-               tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
-               tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
+               ipgr1 = DEFAULT_82542_TIPG_IPGR1;
+               ipgr2 = DEFAULT_82542_TIPG_IPGR2;
                break;
        default:
-               if (hw->media_type == e1000_media_type_fiber ||
-                   hw->media_type == e1000_media_type_internal_serdes)
-                       tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
-               else
-                       tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
-               tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
-               tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
+               ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+               ipgr2 = DEFAULT_82543_TIPG_IPGR2;
+               break;
        }
+       tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+       tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
        E1000_WRITE_REG(hw, TIPG, tipg);
 
        /* Set the Tx Interrupt Delay register */
@@ -1378,7 +1559,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
 
        size = sizeof(struct e1000_ps_page) * rxdr->count;
        rxdr->ps_page = kmalloc(size, GFP_KERNEL);
-       if(!rxdr->ps_page) {
+       if (!rxdr->ps_page) {
                vfree(rxdr->buffer_info);
                DPRINTK(PROBE, ERR,
                "Unable to allocate memory for the receive descriptor ring\n");
@@ -1388,7 +1569,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
 
        size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
        rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
-       if(!rxdr->ps_page_dma) {
+       if (!rxdr->ps_page_dma) {
                vfree(rxdr->buffer_info);
                kfree(rxdr->ps_page);
                DPRINTK(PROBE, ERR,
@@ -1397,7 +1578,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
        }
        memset(rxdr->ps_page_dma, 0, size);
 
-       if(adapter->hw.mac_type <= e1000_82547_rev_2)
+       if (adapter->hw.mac_type <= e1000_82547_rev_2)
                desc_len = sizeof(struct e1000_rx_desc);
        else
                desc_len = sizeof(union e1000_rx_desc_packet_split);
@@ -1454,6 +1635,8 @@ setup_rx_desc_die:
 
        rxdr->next_to_clean = 0;
        rxdr->next_to_use = 0;
+       rxdr->rx_skb_top = NULL;
+       rxdr->rx_skb_prev = NULL;
 
        return 0;
 }
@@ -1475,7 +1658,7 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
 {
        int i, err = 0;
 
-       for (i = 0; i < adapter->num_queues; i++) {
+       for (i = 0; i < adapter->num_rx_queues; i++) {
                err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
                if (err) {
                        DPRINTK(PROBE, ERR,
@@ -1498,7 +1681,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
 {
        uint32_t rctl, rfctl;
        uint32_t psrctl = 0;
-#ifdef CONFIG_E1000_PACKET_SPLIT
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
        uint32_t pages = 0;
 #endif
 
@@ -1510,7 +1693,10 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
                E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
                (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
 
-       if(adapter->hw.tbi_compatibility_on == 1)
+       if (adapter->hw.mac_type > e1000_82543)
+               rctl |= E1000_RCTL_SECRC;
+
+       if (adapter->hw.tbi_compatibility_on == 1)
                rctl |= E1000_RCTL_SBP;
        else
                rctl &= ~E1000_RCTL_SBP;
@@ -1521,32 +1707,17 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
                rctl |= E1000_RCTL_LPE;
 
        /* Setup buffer sizes */
-       if(adapter->hw.mac_type >= e1000_82571) {
+       if (adapter->hw.mac_type >= e1000_82571) {
                /* We can now specify buffers in 1K increments.
                 * BSIZE and BSEX are ignored in this case. */
                rctl |= adapter->rx_buffer_len << 0x11;
        } else {
                rctl &= ~E1000_RCTL_SZ_4096;
-               rctl |= E1000_RCTL_BSEX; 
-               switch (adapter->rx_buffer_len) {
-               case E1000_RXBUFFER_2048:
-               default:
-                       rctl |= E1000_RCTL_SZ_2048;
-                       rctl &= ~E1000_RCTL_BSEX;
-                       break;
-               case E1000_RXBUFFER_4096:
-                       rctl |= E1000_RCTL_SZ_4096;
-                       break;
-               case E1000_RXBUFFER_8192:
-                       rctl |= E1000_RCTL_SZ_8192;
-                       break;
-               case E1000_RXBUFFER_16384:
-                       rctl |= E1000_RCTL_SZ_16384;
-                       break;
-               }
+               rctl &= ~E1000_RCTL_BSEX;
+               rctl |= E1000_RCTL_SZ_2048;
        }
 
-#ifdef CONFIG_E1000_PACKET_SPLIT
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
        /* 82571 and greater support packet-split where the protocol
         * header is placed in skb->data and the packet data is
         * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
@@ -1570,7 +1741,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
                E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
 
                rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
-               
+
                psrctl |= adapter->rx_ps_bsize0 >>
                        E1000_PSRCTL_BSIZE0_SHIFT;
 
@@ -1632,22 +1803,27 @@ e1000_configure_rx(struct e1000_adapter *adapter)
 
        if (hw->mac_type >= e1000_82540) {
                E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
-               if(adapter->itr > 1)
+               if (adapter->itr > 1)
                        E1000_WRITE_REG(hw, ITR,
                                1000000000 / (adapter->itr * 256));
        }
 
        if (hw->mac_type >= e1000_82571) {
-               /* Reset delay timers after every interrupt */
                ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+               /* Reset delay timers after every interrupt */
                ctrl_ext |= E1000_CTRL_EXT_CANC;
+#ifdef CONFIG_E1000_NAPI
+               /* Auto-Mask interrupts upon ICR read. */
+               ctrl_ext |= E1000_CTRL_EXT_IAME;
+#endif
                E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+               E1000_WRITE_REG(hw, IAM, ~0);
                E1000_WRITE_FLUSH(hw);
        }
 
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring */
-       switch (adapter->num_queues) {
+       switch (adapter->num_rx_queues) {
 #ifdef CONFIG_E1000_MQ
        case 2:
                rdba = adapter->rx_ring[1].dma;
@@ -1674,7 +1850,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
        }
 
 #ifdef CONFIG_E1000_MQ
-       if (adapter->num_queues > 1) {
+       if (adapter->num_rx_queues > 1) {
                uint32_t random[10];
 
                get_random_bytes(&random[0], 40);
@@ -1684,7 +1860,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
                        E1000_WRITE_REG(hw, RSSIM, 0);
                }
 
-               switch (adapter->num_queues) {
+               switch (adapter->num_rx_queues) {
                case 2:
                default:
                        reta = 0x00800080;
@@ -1716,13 +1892,13 @@ e1000_configure_rx(struct e1000_adapter *adapter)
        /* Enable 82543 Receive Checksum Offload for TCP and UDP */
        if (hw->mac_type >= e1000_82543) {
                rxcsum = E1000_READ_REG(hw, RXCSUM);
-               if(adapter->rx_csum == TRUE) {
+               if (adapter->rx_csum == TRUE) {
                        rxcsum |= E1000_RXCSUM_TUOFL;
 
                        /* Enable 82571 IPv4 payload checksum for UDP fragments
                         * Must be used in conjunction with packet-split. */
-                       if ((hw->mac_type >= e1000_82571) && 
-                          (adapter->rx_ps_pages)) {
+                       if ((hw->mac_type >= e1000_82571) &&
+                           (adapter->rx_ps_pages)) {
                                rxcsum |= E1000_RXCSUM_IPPCSE;
                        }
                } else {
@@ -1776,7 +1952,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter)
 {
        int i;
 
-       for (i = 0; i < adapter->num_queues; i++)
+       for (i = 0; i < adapter->num_tx_queues; i++)
                e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
 }
 
@@ -1784,17 +1960,15 @@ static inline void
 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
                        struct e1000_buffer *buffer_info)
 {
-       if(buffer_info->dma) {
+       if (buffer_info->dma) {
                pci_unmap_page(adapter->pdev,
                                buffer_info->dma,
                                buffer_info->length,
                                PCI_DMA_TODEVICE);
-               buffer_info->dma = 0;
        }
-       if(buffer_info->skb) {
+       if (buffer_info->skb)
                dev_kfree_skb_any(buffer_info->skb);
-               buffer_info->skb = NULL;
-       }
+       memset(buffer_info, 0, sizeof(struct e1000_buffer));
 }
 
 /**
@@ -1813,7 +1987,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter,
 
        /* Free all the Tx ring sk_buffs */
 
-       for(i = 0; i < tx_ring->count; i++) {
+       for (i = 0; i < tx_ring->count; i++) {
                buffer_info = &tx_ring->buffer_info[i];
                e1000_unmap_and_free_tx_resource(adapter, buffer_info);
        }
@@ -1843,7 +2017,7 @@ e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
 {
        int i;
 
-       for (i = 0; i < adapter->num_queues; i++)
+       for (i = 0; i < adapter->num_tx_queues; i++)
                e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
 }
 
@@ -1887,7 +2061,7 @@ e1000_free_all_rx_resources(struct e1000_adapter *adapter)
 {
        int i;
 
-       for (i = 0; i < adapter->num_queues; i++)
+       for (i = 0; i < adapter->num_rx_queues; i++)
                e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
 }
 
@@ -1909,12 +2083,9 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
        unsigned int i, j;
 
        /* Free all the Rx ring sk_buffs */
-
-       for(i = 0; i < rx_ring->count; i++) {
+       for (i = 0; i < rx_ring->count; i++) {
                buffer_info = &rx_ring->buffer_info[i];
-               if(buffer_info->skb) {
-                       ps_page = &rx_ring->ps_page[i];
-                       ps_page_dma = &rx_ring->ps_page_dma[i];
+               if (buffer_info->skb) {
                        pci_unmap_single(pdev,
                                         buffer_info->dma,
                                         buffer_info->length,
@@ -1922,19 +2093,30 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
 
                        dev_kfree_skb(buffer_info->skb);
                        buffer_info->skb = NULL;
-
-                       for(j = 0; j < adapter->rx_ps_pages; j++) {
-                               if(!ps_page->ps_page[j]) break;
-                               pci_unmap_single(pdev,
-                                                ps_page_dma->ps_page_dma[j],
-                                                PAGE_SIZE, PCI_DMA_FROMDEVICE);
-                               ps_page_dma->ps_page_dma[j] = 0;
-                               put_page(ps_page->ps_page[j]);
-                               ps_page->ps_page[j] = NULL;
-                       }
                }
+               ps_page = &rx_ring->ps_page[i];
+               ps_page_dma = &rx_ring->ps_page_dma[i];
+               for (j = 0; j < adapter->rx_ps_pages; j++) {
+                       if (!ps_page->ps_page[j]) break;
+                       pci_unmap_page(pdev,
+                                      ps_page_dma->ps_page_dma[j],
+                                      PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                       ps_page_dma->ps_page_dma[j] = 0;
+                       put_page(ps_page->ps_page[j]);
+                       ps_page->ps_page[j] = NULL;
+               }
+       }
+
+       /* there also may be some cached data in our adapter */
+       if (rx_ring->rx_skb_top) {
+               dev_kfree_skb(rx_ring->rx_skb_top);
+
+               /* rx_skb_prev will be wiped out by rx_skb_top */
+               rx_ring->rx_skb_top = NULL;
+               rx_ring->rx_skb_prev = NULL;
        }
 
+
        size = sizeof(struct e1000_buffer) * rx_ring->count;
        memset(rx_ring->buffer_info, 0, size);
        size = sizeof(struct e1000_ps_page) * rx_ring->count;
@@ -1963,7 +2145,7 @@ e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
 {
        int i;
 
-       for (i = 0; i < adapter->num_queues; i++)
+       for (i = 0; i < adapter->num_rx_queues; i++)
                e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
 }
 
@@ -1984,7 +2166,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
        E1000_WRITE_FLUSH(&adapter->hw);
        mdelay(5);
 
-       if(netif_running(netdev))
+       if (netif_running(netdev))
                e1000_clean_all_rx_rings(adapter);
 }
 
@@ -2000,12 +2182,14 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
        E1000_WRITE_FLUSH(&adapter->hw);
        mdelay(5);
 
-       if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
+       if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
                e1000_pci_set_mwi(&adapter->hw);
 
-       if(netif_running(netdev)) {
+       if (netif_running(netdev)) {
+               /* No need to loop, because 82542 supports only 1 queue */
+               struct e1000_rx_ring *ring = &adapter->rx_ring[0];
                e1000_configure_rx(adapter);
-               e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]);
+               adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
        }
 }
 
@@ -2023,12 +2207,12 @@ e1000_set_mac(struct net_device *netdev, void *p)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct sockaddr *addr = p;
 
-       if(!is_valid_ether_addr(addr->sa_data))
+       if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
        /* 82542 2.0 needs to be in reset to write receive address registers */
 
-       if(adapter->hw.mac_type == e1000_82542_rev2_0)
+       if (adapter->hw.mac_type == e1000_82542_rev2_0)
                e1000_enter_82542_rst(adapter);
 
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -2042,17 +2226,17 @@ e1000_set_mac(struct net_device *netdev, void *p)
                /* activate the work around */
                adapter->hw.laa_is_present = 1;
 
-               /* Hold a copy of the LAA in RAR[14] This is done so that 
-                * between the time RAR[0] gets clobbered  and the time it 
-                * gets fixed (in e1000_watchdog), the actual LAA is in one 
+               /* Hold a copy of the LAA in RAR[14] This is done so that
+                * between the time RAR[0] gets clobbered  and the time it
+                * gets fixed (in e1000_watchdog), the actual LAA is in one
                 * of the RARs and no incoming packets directed to this port
-                * are dropped. Eventaully the LAA will be in RAR[0] and 
+                * are dropped. Eventaully the LAA will be in RAR[0] and
                 * RAR[14] */
-               e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 
+               e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
                                        E1000_RAR_ENTRIES - 1);
        }
 
-       if(adapter->hw.mac_type == e1000_82542_rev2_0)
+       if (adapter->hw.mac_type == e1000_82542_rev2_0)
                e1000_leave_82542_rst(adapter);
 
        return 0;
@@ -2086,9 +2270,9 @@ e1000_set_multi(struct net_device *netdev)
 
        rctl = E1000_READ_REG(hw, RCTL);
 
-       if(netdev->flags & IFF_PROMISC) {
+       if (netdev->flags & IFF_PROMISC) {
                rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
-       } else if(netdev->flags & IFF_ALLMULTI) {
+       } else if (netdev->flags & IFF_ALLMULTI) {
                rctl |= E1000_RCTL_MPE;
                rctl &= ~E1000_RCTL_UPE;
        } else {
@@ -2099,7 +2283,7 @@ e1000_set_multi(struct net_device *netdev)
 
        /* 82542 2.0 needs to be in reset to write receive address registers */
 
-       if(hw->mac_type == e1000_82542_rev2_0)
+       if (hw->mac_type == e1000_82542_rev2_0)
                e1000_enter_82542_rst(adapter);
 
        /* load the first 14 multicast address into the exact filters 1-14
@@ -2109,7 +2293,7 @@ e1000_set_multi(struct net_device *netdev)
         */
        mc_ptr = netdev->mc_list;
 
-       for(i = 1; i < rar_entries; i++) {
+       for (i = 1; i < rar_entries; i++) {
                if (mc_ptr) {
                        e1000_rar_set(hw, mc_ptr->dmi_addr, i);
                        mc_ptr = mc_ptr->next;
@@ -2121,17 +2305,17 @@ e1000_set_multi(struct net_device *netdev)
 
        /* clear the old settings from the multicast hash table */
 
-       for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
+       for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
                E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
 
        /* load any remaining addresses into the hash table */
 
-       for(; mc_ptr; mc_ptr = mc_ptr->next) {
+       for (; mc_ptr; mc_ptr = mc_ptr->next) {
                hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
                e1000_mta_set(hw, hash_value);
        }
 
-       if(hw->mac_type == e1000_82542_rev2_0)
+       if (hw->mac_type == e1000_82542_rev2_0)
                e1000_leave_82542_rst(adapter);
 }
 
@@ -2157,8 +2341,8 @@ e1000_82547_tx_fifo_stall(unsigned long data)
        struct net_device *netdev = adapter->netdev;
        uint32_t tctl;
 
-       if(atomic_read(&adapter->tx_fifo_stall)) {
-               if((E1000_READ_REG(&adapter->hw, TDT) ==
+       if (atomic_read(&adapter->tx_fifo_stall)) {
+               if ((E1000_READ_REG(&adapter->hw, TDT) ==
                    E1000_READ_REG(&adapter->hw, TDH)) &&
                   (E1000_READ_REG(&adapter->hw, TDFT) ==
                    E1000_READ_REG(&adapter->hw, TDFH)) &&
@@ -2204,24 +2388,24 @@ static void
 e1000_watchdog_task(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       struct e1000_tx_ring *txdr = &adapter->tx_ring[0];
+       struct e1000_tx_ring *txdr = adapter->tx_ring;
        uint32_t link;
 
        e1000_check_for_link(&adapter->hw);
        if (adapter->hw.mac_type == e1000_82573) {
                e1000_enable_tx_pkt_filtering(&adapter->hw);
-               if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
+               if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
                        e1000_update_mng_vlan(adapter);
-       }       
+       }
 
-       if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
+       if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
           !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
                link = !adapter->hw.serdes_link_down;
        else
                link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
 
-       if(link) {
-               if(!netif_carrier_ok(netdev)) {
+       if (link) {
+               if (!netif_carrier_ok(netdev)) {
                        e1000_get_speed_and_duplex(&adapter->hw,
                                                   &adapter->link_speed,
                                                   &adapter->link_duplex);
@@ -2231,13 +2415,28 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
                               adapter->link_duplex == FULL_DUPLEX ?
                               "Full Duplex" : "Half Duplex");
 
+                       /* tweak tx_queue_len according to speed/duplex */
+                       netdev->tx_queue_len = adapter->tx_queue_len;
+                       adapter->tx_timeout_factor = 1;
+                       if (adapter->link_duplex == HALF_DUPLEX) {
+                               switch (adapter->link_speed) {
+                               case SPEED_10:
+                                       netdev->tx_queue_len = 10;
+                                       adapter->tx_timeout_factor = 8;
+                                       break;
+                               case SPEED_100:
+                                       netdev->tx_queue_len = 100;
+                                       break;
+                               }
+                       }
+
                        netif_carrier_on(netdev);
                        netif_wake_queue(netdev);
                        mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
                        adapter->smartspeed = 0;
                }
        } else {
-               if(netif_carrier_ok(netdev)) {
+               if (netif_carrier_ok(netdev)) {
                        adapter->link_speed = 0;
                        adapter->link_duplex = 0;
                        DPRINTK(LINK, INFO, "NIC Link is Down\n");
@@ -2263,7 +2462,10 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
 
        e1000_update_adaptive(&adapter->hw);
 
-       if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) {
+#ifdef CONFIG_E1000_MQ
+       txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
+#endif
+       if (!netif_carrier_ok(netdev)) {
                if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
                        /* We've lost link, so the controller stops DMA,
                         * but we've got queued Tx work that's never going
@@ -2274,12 +2476,12 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
        }
 
        /* Dynamic mode for Interrupt Throttle Rate (ITR) */
-       if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
+       if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
                /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
                 * asymmetrical Tx or Rx gets ITR=8000; everyone
                 * else is between 2000-8000. */
                uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
-               uint32_t dif = (adapter->gotcl > adapter->gorcl ? 
+               uint32_t dif = (adapter->gotcl > adapter->gorcl ?
                        adapter->gotcl - adapter->gorcl :
                        adapter->gorcl - adapter->gotcl) / 10000;
                uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
@@ -2292,7 +2494,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
        /* Force detection of hung controller every watchdog period */
        adapter->detect_tx_hung = TRUE;
 
-       /* With 82571 controllers, LAA may be overwritten due to controller 
+       /* With 82571 controllers, LAA may be overwritten due to controller
         * reset from the other port. Set the appropriate LAA in RAR[0] */
        if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
                e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
@@ -2314,13 +2516,14 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 {
 #ifdef NETIF_F_TSO
        struct e1000_context_desc *context_desc;
+       struct e1000_buffer *buffer_info;
        unsigned int i;
        uint32_t cmd_length = 0;
        uint16_t ipcse = 0, tucse, mss;
        uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
        int err;
 
-       if(skb_shinfo(skb)->tso_size) {
+       if (skb_shinfo(skb)->tso_size) {
                if (skb_header_cloned(skb)) {
                        err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
                        if (err)
@@ -2329,7 +2532,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 
                hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
                mss = skb_shinfo(skb)->tso_size;
-               if(skb->protocol == ntohs(ETH_P_IP)) {
+               if (skb->protocol == ntohs(ETH_P_IP)) {
                        skb->nh.iph->tot_len = 0;
                        skb->nh.iph->check = 0;
                        skb->h.th->check =
@@ -2341,7 +2544,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                        cmd_length = E1000_TXD_CMD_IP;
                        ipcse = skb->h.raw - skb->data - 1;
 #ifdef NETIF_F_TSO_IPV6
-               } else if(skb->protocol == ntohs(ETH_P_IPV6)) {
+               } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
                        skb->nh.ipv6h->payload_len = 0;
                        skb->h.th->check =
                                ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
@@ -2363,6 +2566,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 
                i = tx_ring->next_to_use;
                context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+               buffer_info = &tx_ring->buffer_info[i];
 
                context_desc->lower_setup.ip_fields.ipcss  = ipcss;
                context_desc->lower_setup.ip_fields.ipcso  = ipcso;
@@ -2374,14 +2578,16 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
                context_desc->cmd_and_length = cpu_to_le32(cmd_length);
 
+               buffer_info->time_stamp = jiffies;
+
                if (++i == tx_ring->count) i = 0;
                tx_ring->next_to_use = i;
 
-               return 1;
+               return TRUE;
        }
 #endif
 
-       return 0;
+       return FALSE;
 }
 
 static inline boolean_t
@@ -2389,13 +2595,15 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
               struct sk_buff *skb)
 {
        struct e1000_context_desc *context_desc;
+       struct e1000_buffer *buffer_info;
        unsigned int i;
        uint8_t css;
 
-       if(likely(skb->ip_summed == CHECKSUM_HW)) {
+       if (likely(skb->ip_summed == CHECKSUM_HW)) {
                css = skb->h.raw - skb->data;
 
                i = tx_ring->next_to_use;
+               buffer_info = &tx_ring->buffer_info[i];
                context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
 
                context_desc->upper_setup.tcp_fields.tucss = css;
@@ -2404,6 +2612,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                context_desc->tcp_seg_setup.data = 0;
                context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
 
+               buffer_info->time_stamp = jiffies;
+
                if (unlikely(++i == tx_ring->count)) i = 0;
                tx_ring->next_to_use = i;
 
@@ -2429,7 +2639,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 
        i = tx_ring->next_to_use;
 
-       while(len) {
+       while (len) {
                buffer_info = &tx_ring->buffer_info[i];
                size = min(len, max_per_txd);
 #ifdef NETIF_F_TSO
@@ -2445,7 +2655,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
 
                /* Workaround for premature desc write-backs
                 * in TSO mode.  Append 4-byte sentinel desc */
-               if(unlikely(mss && !nr_frags && size == len && size > 8))
+               if (unlikely(mss && !nr_frags && size == len && size > 8))
                        size -= 4;
 #endif
                /* work-around for errata 10 and it applies
@@ -2453,13 +2663,13 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                 * The fix is to make sure that the first descriptor of a
                 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
                 */
-               if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
+               if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
                                (size > 2015) && count == 0))
                        size = 2015;
-                                                                                
+
                /* Workaround for potential 82544 hang in PCI-X.  Avoid
                 * terminating buffers within evenly-aligned dwords. */
-               if(unlikely(adapter->pcix_82544 &&
+               if (unlikely(adapter->pcix_82544 &&
                   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
                   size > 4))
                        size -= 4;
@@ -2475,29 +2685,29 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                len -= size;
                offset += size;
                count++;
-               if(unlikely(++i == tx_ring->count)) i = 0;
+               if (unlikely(++i == tx_ring->count)) i = 0;
        }
 
-       for(f = 0; f < nr_frags; f++) {
+       for (f = 0; f < nr_frags; f++) {
                struct skb_frag_struct *frag;
 
                frag = &skb_shinfo(skb)->frags[f];
                len = frag->size;
                offset = frag->page_offset;
 
-               while(len) {
+               while (len) {
                        buffer_info = &tx_ring->buffer_info[i];
                        size = min(len, max_per_txd);
 #ifdef NETIF_F_TSO
                        /* Workaround for premature desc write-backs
                         * in TSO mode.  Append 4-byte sentinel desc */
-                       if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+                       if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
                                size -= 4;
 #endif
                        /* Workaround for potential 82544 hang in PCI-X.
                         * Avoid terminating buffers within evenly-aligned
                         * dwords. */
-                       if(unlikely(adapter->pcix_82544 &&
+                       if (unlikely(adapter->pcix_82544 &&
                           !((unsigned long)(frag->page+offset+size-1) & 4) &&
                           size > 4))
                                size -= 4;
@@ -2514,7 +2724,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                        len -= size;
                        offset += size;
                        count++;
-                       if(unlikely(++i == tx_ring->count)) i = 0;
+                       if (unlikely(++i == tx_ring->count)) i = 0;
                }
        }
 
@@ -2534,35 +2744,35 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
        uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
        unsigned int i;
 
-       if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
+       if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
                             E1000_TXD_CMD_TSE;
                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
 
-               if(likely(tx_flags & E1000_TX_FLAGS_IPV4))
+               if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
                        txd_upper |= E1000_TXD_POPTS_IXSM << 8;
        }
 
-       if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
+       if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
        }
 
-       if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
+       if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
                txd_lower |= E1000_TXD_CMD_VLE;
                txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
        }
 
        i = tx_ring->next_to_use;
 
-       while(count--) {
+       while (count--) {
                buffer_info = &tx_ring->buffer_info[i];
                tx_desc = E1000_TX_DESC(*tx_ring, i);
                tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
                tx_desc->lower.data =
                        cpu_to_le32(txd_lower | buffer_info->length);
                tx_desc->upper.data = cpu_to_le32(txd_upper);
-               if(unlikely(++i == tx_ring->count)) i = 0;
+               if (unlikely(++i == tx_ring->count)) i = 0;
        }
 
        tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
@@ -2597,20 +2807,20 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
 
        E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
 
-       if(adapter->link_duplex != HALF_DUPLEX)
+       if (adapter->link_duplex != HALF_DUPLEX)
                goto no_fifo_stall_required;
 
-       if(atomic_read(&adapter->tx_fifo_stall))
+       if (atomic_read(&adapter->tx_fifo_stall))
                return 1;
 
-       if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
+       if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
                atomic_set(&adapter->tx_fifo_stall, 1);
                return 1;
        }
 
 no_fifo_stall_required:
        adapter->tx_fifo_head += skb_fifo_len;
-       if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
+       if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
                adapter->tx_fifo_head -= adapter->tx_fifo_size;
        return 0;
 }
@@ -2621,27 +2831,27 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
 {
        struct e1000_hw *hw =  &adapter->hw;
        uint16_t length, offset;
-       if(vlan_tx_tag_present(skb)) {
-               if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+       if (vlan_tx_tag_present(skb)) {
+               if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
                        ( adapter->hw.mng_cookie.status &
                          E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
                        return 0;
        }
-       if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
+       if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
                struct ethhdr *eth = (struct ethhdr *) skb->data;
-               if((htons(ETH_P_IP) == eth->h_proto)) {
-                       const struct iphdr *ip = 
+               if ((htons(ETH_P_IP) == eth->h_proto)) {
+                       const struct iphdr *ip =
                                (struct iphdr *)((uint8_t *)skb->data+14);
-                       if(IPPROTO_UDP == ip->protocol) {
-                               struct udphdr *udp = 
-                                       (struct udphdr *)((uint8_t *)ip + 
+                       if (IPPROTO_UDP == ip->protocol) {
+                               struct udphdr *udp =
+                                       (struct udphdr *)((uint8_t *)ip +
                                                (ip->ihl << 2));
-                               if(ntohs(udp->dest) == 67) {
+                               if (ntohs(udp->dest) == 67) {
                                        offset = (uint8_t *)udp + 8 - skb->data;
                                        length = skb->len - offset;
 
                                        return e1000_mng_write_dhcp_info(hw,
-                                                       (uint8_t *)udp + 8, 
+                                                       (uint8_t *)udp + 8,
                                                        length);
                                }
                        }
@@ -2664,7 +2874,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        unsigned int nr_frags = 0;
        unsigned int mss = 0;
        int count = 0;
-       int tso;
+       int tso;
        unsigned int f;
        len -= skb->data_len;
 
@@ -2687,16 +2897,35 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
         * 4 = ceil(buffer len/mss).  To make sure we don't
         * overrun the FIFO, adjust the max buffer len if mss
         * drops. */
-       if(mss) {
+       if (mss) {
+               uint8_t hdr_len;
                max_per_txd = min(mss << 2, max_per_txd);
                max_txd_pwr = fls(max_per_txd) - 1;
+
+       /* TSO Workaround for 82571/2 Controllers -- if skb->data
+        * points to just header, pull a few bytes of payload from
+        * frags into skb->data */
+               hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+               if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) &&
+                       (adapter->hw.mac_type == e1000_82571 ||
+                       adapter->hw.mac_type == e1000_82572)) {
+                       unsigned int pull_size;
+                       pull_size = min((unsigned int)4, skb->data_len);
+                       if (!__pskb_pull_tail(skb, pull_size)) {
+                               printk(KERN_ERR "__pskb_pull_tail failed.\n");
+                               dev_kfree_skb_any(skb);
+                               return -EFAULT;
+                       }
+                       len = skb->len - skb->data_len;
+               }
        }
 
-       if((mss) || (skb->ip_summed == CHECKSUM_HW))
+       /* reserve a descriptor for the offload context */
+       if ((mss) || (skb->ip_summed == CHECKSUM_HW))
                count++;
        count++;
 #else
-       if(skb->ip_summed == CHECKSUM_HW)
+       if (skb->ip_summed == CHECKSUM_HW)
                count++;
 #endif
 
@@ -2709,45 +2938,24 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
        count += TXD_USE_COUNT(len, max_txd_pwr);
 
-       if(adapter->pcix_82544)
+       if (adapter->pcix_82544)
                count++;
 
-       /* work-around for errata 10 and it applies to all controllers 
+       /* work-around for errata 10 and it applies to all controllers
         * in PCI-X mode, so add one more descriptor to the count
         */
-       if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
+       if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
                        (len > 2015)))
                count++;
 
        nr_frags = skb_shinfo(skb)->nr_frags;
-       for(f = 0; f < nr_frags; f++)
+       for (f = 0; f < nr_frags; f++)
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
                                       max_txd_pwr);
-       if(adapter->pcix_82544)
+       if (adapter->pcix_82544)
                count += nr_frags;
 
-#ifdef NETIF_F_TSO
-       /* TSO Workaround for 82571/2 Controllers -- if skb->data
-        * points to just header, pull a few bytes of payload from 
-        * frags into skb->data */
-       if (skb_shinfo(skb)->tso_size) {
-               uint8_t hdr_len;
-               hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
-               if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) && 
-                       (adapter->hw.mac_type == e1000_82571 ||
-                       adapter->hw.mac_type == e1000_82572)) {
-                       unsigned int pull_size;
-                       pull_size = min((unsigned int)4, skb->data_len);
-                       if (!__pskb_pull_tail(skb, pull_size)) {
-                               printk(KERN_ERR "__pskb_pull_tail failed.\n");
-                               dev_kfree_skb_any(skb);
-                               return -EFAULT;
-                       }
-               }
-       }
-#endif
-
-       if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
+       if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
                e1000_transfer_dhcp_info(adapter, skb);
 
        local_irq_save(flags);
@@ -2765,8 +2973,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                return NETDEV_TX_BUSY;
        }
 
-       if(unlikely(adapter->hw.mac_type == e1000_82547)) {
-               if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
+       if (unlikely(adapter->hw.mac_type == e1000_82547)) {
+               if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
                        netif_stop_queue(netdev);
                        mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
                        spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
@@ -2774,13 +2982,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                }
        }
 
-       if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+       if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
                tx_flags |= E1000_TX_FLAGS_VLAN;
                tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
        }
 
        first = tx_ring->next_to_use;
-       
+
        tso = e1000_tso(adapter, tx_ring, skb);
        if (tso < 0) {
                dev_kfree_skb_any(skb);
@@ -2833,6 +3041,7 @@ e1000_tx_timeout_task(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
+       adapter->tx_timeout_count++;
        e1000_down(adapter);
        e1000_up(adapter);
 }
@@ -2850,7 +3059,7 @@ e1000_get_stats(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
-       e1000_update_stats(adapter);
+       /* only return the current stats */
        return &adapter->net_stats;
 }
 
@@ -2868,56 +3077,57 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 
-       if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
-               (max_frame > MAX_JUMBO_FRAME_SIZE)) {
-                       DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
-                       return -EINVAL;
-       }
-
-#define MAX_STD_JUMBO_FRAME_SIZE 9234
-       /* might want this to be bigger enum check... */
-       /* 82571 controllers limit jumbo frame size to 10500 bytes */
-       if ((adapter->hw.mac_type == e1000_82571 || 
-            adapter->hw.mac_type == e1000_82572) &&
-           max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
-               DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
-                                   "on 82571 and 82572 controllers.\n");
-               return -EINVAL;
-       }
-
-       if(adapter->hw.mac_type == e1000_82573 &&
-           max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
-               DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
-                                   "on 82573\n");
+       if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
+           (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+               DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
                return -EINVAL;
        }
 
-       if(adapter->hw.mac_type > e1000_82547_rev_2) {
-               adapter->rx_buffer_len = max_frame;
-               E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
-       } else {
-               if(unlikely((adapter->hw.mac_type < e1000_82543) &&
-                  (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
-                       DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
-                                           "on 82542\n");
+       /* Adapter-specific max frame size limits. */
+       switch (adapter->hw.mac_type) {
+       case e1000_82542_rev2_0:
+       case e1000_82542_rev2_1:
+       case e1000_82573:
+               if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
+                       DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+                       return -EINVAL;
+               }
+               break;
+       case e1000_82571:
+       case e1000_82572:
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
+               if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+                       DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
                        return -EINVAL;
-
-               } else {
-                       if(max_frame <= E1000_RXBUFFER_2048) {
-                               adapter->rx_buffer_len = E1000_RXBUFFER_2048;
-                       } else if(max_frame <= E1000_RXBUFFER_4096) {
-                               adapter->rx_buffer_len = E1000_RXBUFFER_4096;
-                       } else if(max_frame <= E1000_RXBUFFER_8192) {
-                               adapter->rx_buffer_len = E1000_RXBUFFER_8192;
-                       } else if(max_frame <= E1000_RXBUFFER_16384) {
-                               adapter->rx_buffer_len = E1000_RXBUFFER_16384;
-                       }
                }
+               break;
+       default:
+               /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
+               break;
        }
 
+       /* since the driver code now supports splitting a packet across
+        * multiple descriptors, most of the fifo related limitations on
+        * jumbo frame traffic have gone away.
+        * simply use 2k descriptors for everything.
+        *
+        * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+        * means we reserve 2 more, this pushes us to allocate from the next
+        * larger slab size
+        * i.e. RXBUFFER_2048 --> size-4096 slab */
+
+       /* recent hardware supports 1KB granularity */
+       if (adapter->hw.mac_type > e1000_82547_rev_2) {
+               adapter->rx_buffer_len =
+                   ((max_frame < E1000_RXBUFFER_2048) ?
+                       max_frame : E1000_RXBUFFER_2048);
+               E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
+       } else
+               adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+
        netdev->mtu = new_mtu;
 
-       if(netif_running(netdev)) {
+       if (netif_running(netdev)) {
                e1000_down(adapter);
                e1000_up(adapter);
        }
@@ -3004,7 +3214,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
        hw->collision_delta = E1000_READ_REG(hw, COLC);
        adapter->stats.colc += hw->collision_delta;
 
-       if(hw->mac_type >= e1000_82543) {
+       if (hw->mac_type >= e1000_82543) {
                adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
                adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
                adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
@@ -3012,7 +3222,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
                adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
                adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
        }
-       if(hw->mac_type > e1000_82547_rev_2) {
+       if (hw->mac_type > e1000_82547_rev_2) {
                adapter->stats.iac += E1000_READ_REG(hw, IAC);
                adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
                adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
@@ -3037,12 +3247,11 @@ e1000_update_stats(struct e1000_adapter *adapter)
 
        adapter->net_stats.rx_errors = adapter->stats.rxerrc +
                adapter->stats.crcerrs + adapter->stats.algnerrc +
-               adapter->stats.rlec + adapter->stats.mpc + 
-               adapter->stats.cexterr;
+               adapter->stats.rlec + adapter->stats.cexterr;
+       adapter->net_stats.rx_dropped = 0;
        adapter->net_stats.rx_length_errors = adapter->stats.rlec;
        adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
        adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
-       adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
        adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
 
        /* Tx Errors */
@@ -3057,14 +3266,14 @@ e1000_update_stats(struct e1000_adapter *adapter)
 
        /* Phy Stats */
 
-       if(hw->media_type == e1000_media_type_copper) {
-               if((adapter->link_speed == SPEED_1000) &&
+       if (hw->media_type == e1000_media_type_copper) {
+               if ((adapter->link_speed == SPEED_1000) &&
                   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
                        phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
                        adapter->phy_stats.idle_errors += phy_tmp;
                }
 
-               if((hw->mac_type <= e1000_82546) &&
+               if ((hw->mac_type <= e1000_82546) &&
                   (hw->phy_type == e1000_phy_m88) &&
                   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
                        adapter->phy_stats.receive_errors += phy_tmp;
@@ -3110,32 +3319,44 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        uint32_t icr = E1000_READ_REG(hw, ICR);
-#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI)
+#ifndef CONFIG_E1000_NAPI
        int i;
+#else
+       /* Interrupt Auto-Mask...upon reading ICR,
+        * interrupts are masked.  No need for the
+        * IMC write, but it does mean we should
+        * account for it ASAP. */
+       if (likely(hw->mac_type >= e1000_82571))
+               atomic_inc(&adapter->irq_sem);
 #endif
 
-       if(unlikely(!icr))
+       if (unlikely(!icr)) {
+#ifdef CONFIG_E1000_NAPI
+               if (hw->mac_type >= e1000_82571)
+                       e1000_irq_enable(adapter);
+#endif
                return IRQ_NONE;  /* Not our interrupt */
+       }
 
-       if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+       if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
                hw->get_link_status = 1;
                mod_timer(&adapter->watchdog_timer, jiffies);
        }
 
 #ifdef CONFIG_E1000_NAPI
-       atomic_inc(&adapter->irq_sem);
-       E1000_WRITE_REG(hw, IMC, ~0);
-       E1000_WRITE_FLUSH(hw);
+       if (unlikely(hw->mac_type < e1000_82571)) {
+               atomic_inc(&adapter->irq_sem);
+               E1000_WRITE_REG(hw, IMC, ~0);
+               E1000_WRITE_FLUSH(hw);
+       }
 #ifdef CONFIG_E1000_MQ
        if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
-               cpu_set(adapter->cpu_for_queue[0],
-                       adapter->rx_sched_call_data.cpumask);
-               for (i = 1; i < adapter->num_queues; i++) {
-                       cpu_set(adapter->cpu_for_queue[i],
-                               adapter->rx_sched_call_data.cpumask);
-                       atomic_inc(&adapter->irq_sem);
-               }
-               atomic_set(&adapter->rx_sched_call_data.count, i);
+               /* We must setup the cpumask once count == 0 since
+                * each cpu bit is cleared when the work is done. */
+               adapter->rx_sched_call_data.cpumask = adapter->cpumask;
+               atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem);
+               atomic_set(&adapter->rx_sched_call_data.count,
+                          adapter->num_rx_queues);
                smp_call_async_mask(&adapter->rx_sched_call_data);
        } else {
                printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
@@ -3149,26 +3370,26 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
 
 #else /* if !CONFIG_E1000_NAPI */
        /* Writing IMC and IMS is needed for 82547.
-          Due to Hub Link bus being occupied, an interrupt
-          de-assertion message is not able to be sent.
-          When an interrupt assertion message is generated later,
-          two messages are re-ordered and sent out.
-          That causes APIC to think 82547 is in de-assertion
-          state, while 82547 is in assertion state, resulting
-          in dead lock. Writing IMC forces 82547 into
-          de-assertion state.
-       */
-       if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){
+        * Due to Hub Link bus being occupied, an interrupt
+        * de-assertion message is not able to be sent.
+        * When an interrupt assertion message is generated later,
+        * two messages are re-ordered and sent out.
+        * That causes APIC to think 82547 is in de-assertion
+        * state, while 82547 is in assertion state, resulting
+        * in dead lock. Writing IMC forces 82547 into
+        * de-assertion state.
+        */
+       if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
                atomic_inc(&adapter->irq_sem);
                E1000_WRITE_REG(hw, IMC, ~0);
        }
 
-       for(i = 0; i < E1000_MAX_INTR; i++)
-               if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+       for (i = 0; i < E1000_MAX_INTR; i++)
+               if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
                   !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
                        break;
 
-       if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
+       if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
                e1000_irq_enable(adapter);
 
 #endif /* CONFIG_E1000_NAPI */
@@ -3187,7 +3408,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
 {
        struct e1000_adapter *adapter;
        int work_to_do = min(*budget, poll_dev->quota);
-       int tx_cleaned, i = 0, work_done = 0;
+       int tx_cleaned = 0, i = 0, work_done = 0;
 
        /* Must NOT use netdev_priv macro here. */
        adapter = poll_dev->priv;
@@ -3198,19 +3419,31 @@ e1000_clean(struct net_device *poll_dev, int *budget)
 
        while (poll_dev != &adapter->polling_netdev[i]) {
                i++;
-               if (unlikely(i == adapter->num_queues))
+               if (unlikely(i == adapter->num_rx_queues))
                        BUG();
        }
 
-       tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
+       if (likely(adapter->num_tx_queues == 1)) {
+               /* e1000_clean is called per-cpu.  This lock protects
+                * tx_ring[0] from being cleaned by multiple cpus
+                * simultaneously.  A failure obtaining the lock means
+                * tx_ring[0] is currently being cleaned anyway. */
+               if (spin_trylock(&adapter->tx_queue_lock)) {
+                       tx_cleaned = e1000_clean_tx_irq(adapter,
+                                                       &adapter->tx_ring[0]);
+                       spin_unlock(&adapter->tx_queue_lock);
+               }
+       } else
+               tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
+
        adapter->clean_rx(adapter, &adapter->rx_ring[i],
                          &work_done, work_to_do);
 
        *budget -= work_done;
        poll_dev->quota -= work_done;
-       
+
        /* If no Tx and not enough Rx work done, exit the polling mode */
-       if((!tx_cleaned && (work_done == 0)) ||
+       if ((!tx_cleaned && (work_done == 0)) ||
           !netif_running(adapter->netdev)) {
 quit_polling:
                netif_rx_complete(poll_dev);
@@ -3242,22 +3475,24 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
        eop_desc = E1000_TX_DESC(*tx_ring, eop);
 
        while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
-               for(cleaned = FALSE; !cleaned; ) {
+               for (cleaned = FALSE; !cleaned; ) {
                        tx_desc = E1000_TX_DESC(*tx_ring, i);
                        buffer_info = &tx_ring->buffer_info[i];
                        cleaned = (i == eop);
 
+#ifdef CONFIG_E1000_MQ
+                       tx_ring->tx_stats.bytes += buffer_info->length;
+#endif
                        e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+                       memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
 
-                       tx_desc->buffer_addr = 0;
-                       tx_desc->lower.data = 0;
-                       tx_desc->upper.data = 0;
-
-                       if(unlikely(++i == tx_ring->count)) i = 0;
+                       if (unlikely(++i == tx_ring->count)) i = 0;
                }
 
-               tx_ring->pkt++;
-               
+#ifdef CONFIG_E1000_MQ
+               tx_ring->tx_stats.packets++;
+#endif
+
                eop = tx_ring->buffer_info[i].next_to_watch;
                eop_desc = E1000_TX_DESC(*tx_ring, eop);
        }
@@ -3266,7 +3501,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
 
        spin_lock(&tx_ring->tx_lock);
 
-       if(unlikely(cleaned && netif_queue_stopped(netdev) &&
+       if (unlikely(cleaned && netif_queue_stopped(netdev) &&
                    netif_carrier_ok(netdev)))
                netif_wake_queue(netdev);
 
@@ -3276,32 +3511,31 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
                /* Detect a transmit hang in hardware, this serializes the
                 * check with the clearing of time_stamp and movement of i */
                adapter->detect_tx_hung = FALSE;
-               if (tx_ring->buffer_info[i].dma &&
-                   time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
+               if (tx_ring->buffer_info[eop].dma &&
+                   time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
+                              adapter->tx_timeout_factor * HZ)
                    && !(E1000_READ_REG(&adapter->hw, STATUS) &
-                       E1000_STATUS_TXOFF)) {
+                        E1000_STATUS_TXOFF)) {
 
                        /* detected Tx unit hang */
-                       i = tx_ring->next_to_clean;
-                       eop = tx_ring->buffer_info[i].next_to_watch;
-                       eop_desc = E1000_TX_DESC(*tx_ring, eop);
                        DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+                                       "  Tx Queue             <%lu>\n"
                                        "  TDH                  <%x>\n"
                                        "  TDT                  <%x>\n"
                                        "  next_to_use          <%x>\n"
                                        "  next_to_clean        <%x>\n"
                                        "buffer_info[next_to_clean]\n"
-                                       "  dma                  <%llx>\n"
                                        "  time_stamp           <%lx>\n"
                                        "  next_to_watch        <%x>\n"
                                        "  jiffies              <%lx>\n"
                                        "  next_to_watch.status <%x>\n",
+                               (unsigned long)((tx_ring - adapter->tx_ring) /
+                                       sizeof(struct e1000_tx_ring)),
                                readl(adapter->hw.hw_addr + tx_ring->tdh),
                                readl(adapter->hw.hw_addr + tx_ring->tdt),
                                tx_ring->next_to_use,
-                               i,
-                               (unsigned long long)tx_ring->buffer_info[i].dma,
-                               tx_ring->buffer_info[i].time_stamp,
+                               tx_ring->next_to_clean,
+                               tx_ring->buffer_info[eop].time_stamp,
                                eop,
                                jiffies,
                                eop_desc->upper.fields.status);
@@ -3329,21 +3563,21 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
        skb->ip_summed = CHECKSUM_NONE;
 
        /* 82543 or newer only */
-       if(unlikely(adapter->hw.mac_type < e1000_82543)) return;
+       if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
        /* Ignore Checksum bit is set */
-       if(unlikely(status & E1000_RXD_STAT_IXSM)) return;
+       if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
        /* TCP/UDP checksum error bit is set */
-       if(unlikely(errors & E1000_RXD_ERR_TCPE)) {
+       if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
                /* let the stack verify checksum errors */
                adapter->hw_csum_err++;
                return;
        }
        /* TCP/UDP Checksum has not been calculated */
-       if(adapter->hw.mac_type <= e1000_82547_rev_2) {
-               if(!(status & E1000_RXD_STAT_TCPCS))
+       if (adapter->hw.mac_type <= e1000_82547_rev_2) {
+               if (!(status & E1000_RXD_STAT_TCPCS))
                        return;
        } else {
-               if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+               if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
                        return;
        }
        /* It must be a TCP or UDP packet with a valid checksum */
@@ -3379,46 +3613,87 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
 {
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
-       struct e1000_rx_desc *rx_desc;
-       struct e1000_buffer *buffer_info;
-       struct sk_buff *skb;
+       struct e1000_rx_desc *rx_desc, *next_rxd;
+       struct e1000_buffer *buffer_info, *next_buffer;
        unsigned long flags;
        uint32_t length;
        uint8_t last_byte;
        unsigned int i;
-       boolean_t cleaned = FALSE;
+       int cleaned_count = 0;
+       boolean_t cleaned = FALSE, multi_descriptor = FALSE;
 
        i = rx_ring->next_to_clean;
        rx_desc = E1000_RX_DESC(*rx_ring, i);
+       buffer_info = &rx_ring->buffer_info[i];
 
-       while(rx_desc->status & E1000_RXD_STAT_DD) {
-               buffer_info = &rx_ring->buffer_info[i];
+       while (rx_desc->status & E1000_RXD_STAT_DD) {
+               struct sk_buff *skb, *next_skb;
+               u8 status;
 #ifdef CONFIG_E1000_NAPI
-               if(*work_done >= work_to_do)
+               if (*work_done >= work_to_do)
                        break;
                (*work_done)++;
 #endif
-               cleaned = TRUE;
+               status = rx_desc->status;
+               skb = buffer_info->skb;
+               buffer_info->skb = NULL;
+
+               if (++i == rx_ring->count) i = 0;
+               next_rxd = E1000_RX_DESC(*rx_ring, i);
+               next_buffer = &rx_ring->buffer_info[i];
+               next_skb = next_buffer->skb;
 
+               cleaned = TRUE;
+               cleaned_count++;
                pci_unmap_single(pdev,
                                 buffer_info->dma,
                                 buffer_info->length,
                                 PCI_DMA_FROMDEVICE);
 
-               skb = buffer_info->skb;
                length = le16_to_cpu(rx_desc->length);
 
-               if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
-                       /* All receives must fit into a single buffer */
-                       E1000_DBG("%s: Receive packet consumed multiple"
-                                 " buffers\n", netdev->name);
-                       dev_kfree_skb_irq(skb);
+               skb_put(skb, length);
+
+               if (!(status & E1000_RXD_STAT_EOP)) {
+                       if (!rx_ring->rx_skb_top) {
+                               rx_ring->rx_skb_top = skb;
+                               rx_ring->rx_skb_top->len = length;
+                               rx_ring->rx_skb_prev = skb;
+                       } else {
+                               if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) {
+                                       rx_ring->rx_skb_prev->next = skb;
+                                       skb->prev = rx_ring->rx_skb_prev;
+                               } else {
+                                       skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb;
+                               }
+                               rx_ring->rx_skb_prev = skb;
+                               rx_ring->rx_skb_top->data_len += length;
+                       }
                        goto next_desc;
+               } else {
+                       if (rx_ring->rx_skb_top) {
+                               if (skb_shinfo(rx_ring->rx_skb_top)
+                                                       ->frag_list) {
+                                       rx_ring->rx_skb_prev->next = skb;
+                                       skb->prev = rx_ring->rx_skb_prev;
+                               } else
+                                       skb_shinfo(rx_ring->rx_skb_top)
+                                                       ->frag_list = skb;
+
+                               rx_ring->rx_skb_top->data_len += length;
+                               rx_ring->rx_skb_top->len +=
+                                       rx_ring->rx_skb_top->data_len;
+
+                               skb = rx_ring->rx_skb_top;
+                               multi_descriptor = TRUE;
+                               rx_ring->rx_skb_top = NULL;
+                               rx_ring->rx_skb_prev = NULL;
+                       }
                }
 
-               if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
+               if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
                        last_byte = *(skb->data + length - 1);
-                       if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
+                       if (TBI_ACCEPT(&adapter->hw, status,
                                      rx_desc->errors, length, last_byte)) {
                                spin_lock_irqsave(&adapter->stats_lock, flags);
                                e1000_tbi_adjust_stats(&adapter->hw,
@@ -3433,18 +3708,41 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
                        }
                }
 
-               /* Good Receive */
-               skb_put(skb, length - ETHERNET_FCS_SIZE);
+               /* code added for copybreak, this should improve
+                * performance for small packets with large amounts
+                * of reassembly being done in the stack */
+#define E1000_CB_LENGTH 256
+               if ((length < E1000_CB_LENGTH) &&
+                  !rx_ring->rx_skb_top &&
+                  /* or maybe (status & E1000_RXD_STAT_EOP) && */
+                  !multi_descriptor) {
+                       struct sk_buff *new_skb =
+                           dev_alloc_skb(length + NET_IP_ALIGN);
+                       if (new_skb) {
+                               skb_reserve(new_skb, NET_IP_ALIGN);
+                               new_skb->dev = netdev;
+                               memcpy(new_skb->data - NET_IP_ALIGN,
+                                      skb->data - NET_IP_ALIGN,
+                                      length + NET_IP_ALIGN);
+                               /* save the skb in buffer_info as good */
+                               buffer_info->skb = skb;
+                               skb = new_skb;
+                               skb_put(skb, length);
+                       }
+               }
+
+               /* end copybreak code */
 
                /* Receive Checksum Offload */
                e1000_rx_checksum(adapter,
-                                 (uint32_t)(rx_desc->status) |
+                                 (uint32_t)(status) |
                                  ((uint32_t)(rx_desc->errors) << 24),
                                  rx_desc->csum, skb);
+
                skb->protocol = eth_type_trans(skb, netdev);
 #ifdef CONFIG_E1000_NAPI
-               if(unlikely(adapter->vlgrp &&
-                           (rx_desc->status & E1000_RXD_STAT_VP))) {
+               if (unlikely(adapter->vlgrp &&
+                           (status & E1000_RXD_STAT_VP))) {
                        vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
                                                 le16_to_cpu(rx_desc->special) &
                                                 E1000_RXD_SPC_VLAN_MASK);
@@ -3452,8 +3750,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
                        netif_receive_skb(skb);
                }
 #else /* CONFIG_E1000_NAPI */
-               if(unlikely(adapter->vlgrp &&
-                           (rx_desc->status & E1000_RXD_STAT_VP))) {
+               if (unlikely(adapter->vlgrp &&
+                           (status & E1000_RXD_STAT_VP))) {
                        vlan_hwaccel_rx(skb, adapter->vlgrp,
                                        le16_to_cpu(rx_desc->special) &
                                        E1000_RXD_SPC_VLAN_MASK);
@@ -3462,17 +3760,28 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
                }
 #endif /* CONFIG_E1000_NAPI */
                netdev->last_rx = jiffies;
-               rx_ring->pkt++;
+#ifdef CONFIG_E1000_MQ
+               rx_ring->rx_stats.packets++;
+               rx_ring->rx_stats.bytes += length;
+#endif
 
 next_desc:
                rx_desc->status = 0;
-               buffer_info->skb = NULL;
-               if(unlikely(++i == rx_ring->count)) i = 0;
 
-               rx_desc = E1000_RX_DESC(*rx_ring, i);
+               /* return some buffers to hardware, one at a time is too slow */
+               if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+                       adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+                       cleaned_count = 0;
+               }
+
+               rx_desc = next_rxd;
+               buffer_info = next_buffer;
        }
        rx_ring->next_to_clean = i;
-       adapter->alloc_rx_buf(adapter, rx_ring);
+
+       cleaned_count = E1000_DESC_UNUSED(rx_ring);
+       if (cleaned_count)
+               adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
 
        return cleaned;
 }
@@ -3492,52 +3801,59 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                       struct e1000_rx_ring *rx_ring)
 #endif
 {
-       union e1000_rx_desc_packet_split *rx_desc;
+       union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
-       struct e1000_buffer *buffer_info;
+       struct e1000_buffer *buffer_info, *next_buffer;
        struct e1000_ps_page *ps_page;
        struct e1000_ps_page_dma *ps_page_dma;
-       struct sk_buff *skb;
+       struct sk_buff *skb, *next_skb;
        unsigned int i, j;
        uint32_t length, staterr;
+       int cleaned_count = 0;
        boolean_t cleaned = FALSE;
 
        i = rx_ring->next_to_clean;
        rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
        staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+       buffer_info = &rx_ring->buffer_info[i];
 
-       while(staterr & E1000_RXD_STAT_DD) {
-               buffer_info = &rx_ring->buffer_info[i];
+       while (staterr & E1000_RXD_STAT_DD) {
                ps_page = &rx_ring->ps_page[i];
                ps_page_dma = &rx_ring->ps_page_dma[i];
 #ifdef CONFIG_E1000_NAPI
-               if(unlikely(*work_done >= work_to_do))
+               if (unlikely(*work_done >= work_to_do))
                        break;
                (*work_done)++;
 #endif
+               skb = buffer_info->skb;
+
+               if (++i == rx_ring->count) i = 0;
+               next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
+               next_buffer = &rx_ring->buffer_info[i];
+               next_skb = next_buffer->skb;
+
                cleaned = TRUE;
+               cleaned_count++;
                pci_unmap_single(pdev, buffer_info->dma,
                                 buffer_info->length,
                                 PCI_DMA_FROMDEVICE);
 
-               skb = buffer_info->skb;
-
-               if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
+               if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
                        E1000_DBG("%s: Packet Split buffers didn't pick up"
                                  " the full packet\n", netdev->name);
                        dev_kfree_skb_irq(skb);
                        goto next_desc;
                }
 
-               if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
+               if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
                        dev_kfree_skb_irq(skb);
                        goto next_desc;
                }
 
                length = le16_to_cpu(rx_desc->wb.middle.length0);
 
-               if(unlikely(!length)) {
+               if (unlikely(!length)) {
                        E1000_DBG("%s: Last part of the packet spanning"
                                  " multiple descriptors\n", netdev->name);
                        dev_kfree_skb_irq(skb);
@@ -3547,8 +3863,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                /* Good Receive */
                skb_put(skb, length);
 
-               for(j = 0; j < adapter->rx_ps_pages; j++) {
-                       if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
+               for (j = 0; j < adapter->rx_ps_pages; j++) {
+                       if (!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
                                break;
 
                        pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
@@ -3568,15 +3884,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                                  rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
                skb->protocol = eth_type_trans(skb, netdev);
 
-               if(likely(rx_desc->wb.upper.header_status &
-                         E1000_RXDPS_HDRSTAT_HDRSP)) {
+               if (likely(rx_desc->wb.upper.header_status &
+                         E1000_RXDPS_HDRSTAT_HDRSP))
                        adapter->rx_hdr_split++;
-#ifdef HAVE_RX_ZERO_COPY
-                       skb_shinfo(skb)->zero_copy = TRUE;
-#endif
-               }
 #ifdef CONFIG_E1000_NAPI
-               if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
+               if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
                        vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
                                le16_to_cpu(rx_desc->wb.middle.vlan) &
                                E1000_RXD_SPC_VLAN_MASK);
@@ -3584,7 +3896,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                        netif_receive_skb(skb);
                }
 #else /* CONFIG_E1000_NAPI */
-               if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
+               if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
                        vlan_hwaccel_rx(skb, adapter->vlgrp,
                                le16_to_cpu(rx_desc->wb.middle.vlan) &
                                E1000_RXD_SPC_VLAN_MASK);
@@ -3593,18 +3905,31 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                }
 #endif /* CONFIG_E1000_NAPI */
                netdev->last_rx = jiffies;
-               rx_ring->pkt++;
+#ifdef CONFIG_E1000_MQ
+               rx_ring->rx_stats.packets++;
+               rx_ring->rx_stats.bytes += length;
+#endif
 
 next_desc:
                rx_desc->wb.middle.status_error &= ~0xFF;
                buffer_info->skb = NULL;
-               if(unlikely(++i == rx_ring->count)) i = 0;
 
-               rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
+               /* return some buffers to hardware, one at a time is too slow */
+               if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+                       adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+                       cleaned_count = 0;
+               }
+
+               rx_desc = next_rxd;
+               buffer_info = next_buffer;
+
                staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
        }
        rx_ring->next_to_clean = i;
-       adapter->alloc_rx_buf(adapter, rx_ring);
+
+       cleaned_count = E1000_DESC_UNUSED(rx_ring);
+       if (cleaned_count)
+               adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
 
        return cleaned;
 }
@@ -3616,7 +3941,8 @@ next_desc:
 
 static void
 e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
-                       struct e1000_rx_ring *rx_ring)
+                       struct e1000_rx_ring *rx_ring,
+                      int cleaned_count)
 {
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
@@ -3629,11 +3955,18 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
        i = rx_ring->next_to_use;
        buffer_info = &rx_ring->buffer_info[i];
 
-       while(!buffer_info->skb) {
-               skb = dev_alloc_skb(bufsz);
+       while (cleaned_count--) {
+               if (!(skb = buffer_info->skb))
+                       skb = dev_alloc_skb(bufsz);
+               else {
+                       skb_trim(skb, 0);
+                       goto map_skb;
+               }
+
 
-               if(unlikely(!skb)) {
+               if (unlikely(!skb)) {
                        /* Better luck next round */
+                       adapter->alloc_rx_buff_failed++;
                        break;
                }
 
@@ -3670,6 +4003,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 
                buffer_info->skb = skb;
                buffer_info->length = adapter->rx_buffer_len;
+map_skb:
                buffer_info->dma = pci_map_single(pdev,
                                                  skb->data,
                                                  adapter->rx_buffer_len,
@@ -3695,20 +4029,23 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                rx_desc = E1000_RX_DESC(*rx_ring, i);
                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
 
-               if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
-                       /* Force memory writes to complete before letting h/w
-                        * know there are new descriptors to fetch.  (Only
-                        * applicable for weak-ordered memory model archs,
-                        * such as IA-64). */
-                       wmb();
-                       writel(i, adapter->hw.hw_addr + rx_ring->rdt);
-               }
-
-               if(unlikely(++i == rx_ring->count)) i = 0;
+               if (unlikely(++i == rx_ring->count))
+                       i = 0;
                buffer_info = &rx_ring->buffer_info[i];
        }
 
-       rx_ring->next_to_use = i;
+       if (likely(rx_ring->next_to_use != i)) {
+               rx_ring->next_to_use = i;
+               if (unlikely(i-- == 0))
+                       i = (rx_ring->count - 1);
+
+               /* Force memory writes to complete before letting h/w
+                * know there are new descriptors to fetch.  (Only
+                * applicable for weak-ordered memory model archs,
+                * such as IA-64). */
+               wmb();
+               writel(i, adapter->hw.hw_addr + rx_ring->rdt);
+       }
 }
 
 /**
@@ -3718,7 +4055,8 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 
 static void
 e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
-                          struct e1000_rx_ring *rx_ring)
+                          struct e1000_rx_ring *rx_ring,
+                         int cleaned_count)
 {
        struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
@@ -3734,16 +4072,18 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
        ps_page = &rx_ring->ps_page[i];
        ps_page_dma = &rx_ring->ps_page_dma[i];
 
-       while(!buffer_info->skb) {
+       while (cleaned_count--) {
                rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
 
-               for(j = 0; j < PS_PAGE_BUFFERS; j++) {
+               for (j = 0; j < PS_PAGE_BUFFERS; j++) {
                        if (j < adapter->rx_ps_pages) {
                                if (likely(!ps_page->ps_page[j])) {
                                        ps_page->ps_page[j] =
                                                alloc_page(GFP_ATOMIC);
-                                       if (unlikely(!ps_page->ps_page[j]))
+                                       if (unlikely(!ps_page->ps_page[j])) {
+                                               adapter->alloc_rx_buff_failed++;
                                                goto no_buffers;
+                                       }
                                        ps_page_dma->ps_page_dma[j] =
                                                pci_map_page(pdev,
                                                            ps_page->ps_page[j],
@@ -3751,7 +4091,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
                                                            PCI_DMA_FROMDEVICE);
                                }
                                /* Refresh the desc even if buffer_addrs didn't
-                                * change because each write-back erases 
+                                * change because each write-back erases
                                 * this info.
                                 */
                                rx_desc->read.buffer_addr[j+1] =
@@ -3762,8 +4102,10 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
 
                skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
 
-               if(unlikely(!skb))
+               if (unlikely(!skb)) {
+                       adapter->alloc_rx_buff_failed++;
                        break;
+               }
 
                /* Make buffer alignment 2 beyond a 16 byte boundary
                 * this will result in a 16 byte aligned IP header after
@@ -3781,27 +4123,28 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
 
                rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
 
-               if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
-                       /* Force memory writes to complete before letting h/w
-                        * know there are new descriptors to fetch.  (Only
-                        * applicable for weak-ordered memory model archs,
-                        * such as IA-64). */
-                       wmb();
-                       /* Hardware increments by 16 bytes, but packet split
-                        * descriptors are 32 bytes...so we increment tail
-                        * twice as much.
-                        */
-                       writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
-               }
-
-               if(unlikely(++i == rx_ring->count)) i = 0;
+               if (unlikely(++i == rx_ring->count)) i = 0;
                buffer_info = &rx_ring->buffer_info[i];
                ps_page = &rx_ring->ps_page[i];
                ps_page_dma = &rx_ring->ps_page_dma[i];
        }
 
 no_buffers:
-       rx_ring->next_to_use = i;
+       if (likely(rx_ring->next_to_use != i)) {
+               rx_ring->next_to_use = i;
+               if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
+
+               /* Force memory writes to complete before letting h/w
+                * know there are new descriptors to fetch.  (Only
+                * applicable for weak-ordered memory model archs,
+                * such as IA-64). */
+               wmb();
+               /* Hardware increments by 16 bytes, but packet split
+                * descriptors are 32 bytes...so we increment tail
+                * twice as much.
+                */
+               writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
+       }
 }
 
 /**
@@ -3815,24 +4158,24 @@ e1000_smartspeed(struct e1000_adapter *adapter)
        uint16_t phy_status;
        uint16_t phy_ctrl;
 
-       if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
+       if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
           !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
                return;
 
-       if(adapter->smartspeed == 0) {
+       if (adapter->smartspeed == 0) {
                /* If Master/Slave config fault is asserted twice,
                 * we assume back-to-back */
                e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
-               if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+               if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
                e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
-               if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+               if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
                e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
-               if(phy_ctrl & CR_1000T_MS_ENABLE) {
+               if (phy_ctrl & CR_1000T_MS_ENABLE) {
                        phy_ctrl &= ~CR_1000T_MS_ENABLE;
                        e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
                                            phy_ctrl);
                        adapter->smartspeed++;
-                       if(!e1000_phy_setup_autoneg(&adapter->hw) &&
+                       if (!e1000_phy_setup_autoneg(&adapter->hw) &&
                           !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
                                               &phy_ctrl)) {
                                phy_ctrl |= (MII_CR_AUTO_NEG_EN |
@@ -3842,12 +4185,12 @@ e1000_smartspeed(struct e1000_adapter *adapter)
                        }
                }
                return;
-       } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
+       } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
                /* If still no link, perhaps using 2/3 pair cable */
                e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
                phy_ctrl |= CR_1000T_MS_ENABLE;
                e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
-               if(!e1000_phy_setup_autoneg(&adapter->hw) &&
+               if (!e1000_phy_setup_autoneg(&adapter->hw) &&
                   !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
                        phy_ctrl |= (MII_CR_AUTO_NEG_EN |
                                     MII_CR_RESTART_AUTO_NEG);
@@ -3855,7 +4198,7 @@ e1000_smartspeed(struct e1000_adapter *adapter)
                }
        }
        /* Restart process after E1000_SMARTSPEED_MAX iterations */
-       if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
+       if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
                adapter->smartspeed = 0;
 }
 
@@ -3896,7 +4239,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        uint16_t spddplx;
        unsigned long flags;
 
-       if(adapter->hw.media_type != e1000_media_type_copper)
+       if (adapter->hw.media_type != e1000_media_type_copper)
                return -EOPNOTSUPP;
 
        switch (cmd) {
@@ -3904,10 +4247,10 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
                data->phy_id = adapter->hw.phy_addr;
                break;
        case SIOCGMIIREG:
-               if(!capable(CAP_NET_ADMIN))
+               if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
                spin_lock_irqsave(&adapter->stats_lock, flags);
-               if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+               if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
                                   &data->val_out)) {
                        spin_unlock_irqrestore(&adapter->stats_lock, flags);
                        return -EIO;
@@ -3915,23 +4258,23 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
                spin_unlock_irqrestore(&adapter->stats_lock, flags);
                break;
        case SIOCSMIIREG:
-               if(!capable(CAP_NET_ADMIN))
+               if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
-               if(data->reg_num & ~(0x1F))
+               if (data->reg_num & ~(0x1F))
                        return -EFAULT;
                mii_reg = data->val_in;
                spin_lock_irqsave(&adapter->stats_lock, flags);
-               if(e1000_write_phy_reg(&adapter->hw, data->reg_num,
+               if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
                                        mii_reg)) {
                        spin_unlock_irqrestore(&adapter->stats_lock, flags);
                        return -EIO;
                }
-               if(adapter->hw.phy_type == e1000_phy_m88) {
+               if (adapter->hw.phy_type == e1000_phy_m88) {
                        switch (data->reg_num) {
                        case PHY_CTRL:
-                               if(mii_reg & MII_CR_POWER_DOWN)
+                               if (mii_reg & MII_CR_POWER_DOWN)
                                        break;
-                               if(mii_reg & MII_CR_AUTO_NEG_EN) {
+                               if (mii_reg & MII_CR_AUTO_NEG_EN) {
                                        adapter->hw.autoneg = 1;
                                        adapter->hw.autoneg_advertised = 0x2F;
                                } else {
@@ -3946,14 +4289,14 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
                                                   HALF_DUPLEX;
                                        retval = e1000_set_spd_dplx(adapter,
                                                                    spddplx);
-                                       if(retval) {
+                                       if (retval) {
                                                spin_unlock_irqrestore(
-                                                       &adapter->stats_lock, 
+                                                       &adapter->stats_lock,
                                                        flags);
                                                return retval;
                                        }
                                }
-                               if(netif_running(adapter->netdev)) {
+                               if (netif_running(adapter->netdev)) {
                                        e1000_down(adapter);
                                        e1000_up(adapter);
                                } else
@@ -3961,7 +4304,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
                                break;
                        case M88E1000_PHY_SPEC_CTRL:
                        case M88E1000_EXT_PHY_SPEC_CTRL:
-                               if(e1000_phy_reset(&adapter->hw)) {
+                               if (e1000_phy_reset(&adapter->hw)) {
                                        spin_unlock_irqrestore(
                                                &adapter->stats_lock, flags);
                                        return -EIO;
@@ -3971,9 +4314,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
                } else {
                        switch (data->reg_num) {
                        case PHY_CTRL:
-                               if(mii_reg & MII_CR_POWER_DOWN)
+                               if (mii_reg & MII_CR_POWER_DOWN)
                                        break;
-                               if(netif_running(adapter->netdev)) {
+                               if (netif_running(adapter->netdev)) {
                                        e1000_down(adapter);
                                        e1000_up(adapter);
                                } else
@@ -3995,7 +4338,7 @@ e1000_pci_set_mwi(struct e1000_hw *hw)
        struct e1000_adapter *adapter = hw->back;
        int ret_val = pci_set_mwi(adapter->pdev);
 
-       if(ret_val)
+       if (ret_val)
                DPRINTK(PROBE, ERR, "Error in setting MWI\n");
 }
 
@@ -4044,7 +4387,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
        e1000_irq_disable(adapter);
        adapter->vlgrp = grp;
 
-       if(grp) {
+       if (grp) {
                /* enable VLAN tag insert/strip */
                ctrl = E1000_READ_REG(&adapter->hw, CTRL);
                ctrl |= E1000_CTRL_VME;
@@ -4066,7 +4409,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
                rctl = E1000_READ_REG(&adapter->hw, RCTL);
                rctl &= ~E1000_RCTL_VFE;
                E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
-               if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
+               if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
                        e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
                        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
                }
@@ -4080,9 +4423,10 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        uint32_t vfta, index;
-       if((adapter->hw.mng_cookie.status &
-               E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
-               (vid == adapter->mng_vlan_id))
+
+       if ((adapter->hw.mng_cookie.status &
+            E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+           (vid == adapter->mng_vlan_id))
                return;
        /* add VID to filter table */
        index = (vid >> 5) & 0x7F;
@@ -4099,15 +4443,19 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
 
        e1000_irq_disable(adapter);
 
-       if(adapter->vlgrp)
+       if (adapter->vlgrp)
                adapter->vlgrp->vlan_devices[vid] = NULL;
 
        e1000_irq_enable(adapter);
 
-       if((adapter->hw.mng_cookie.status &
-               E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
-               (vid == adapter->mng_vlan_id))
+       if ((adapter->hw.mng_cookie.status &
+            E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+           (vid == adapter->mng_vlan_id)) {
+               /* release control to f/w */
+               e1000_release_hw_control(adapter);
                return;
+       }
+
        /* remove VID from filter table */
        index = (vid >> 5) & 0x7F;
        vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -4120,10 +4468,10 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
 {
        e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
 
-       if(adapter->vlgrp) {
+       if (adapter->vlgrp) {
                uint16_t vid;
-               for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
-                       if(!adapter->vlgrp->vlan_devices[vid])
+               for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+                       if (!adapter->vlgrp->vlan_devices[vid])
                                continue;
                        e1000_vlan_rx_add_vid(adapter->netdev, vid);
                }
@@ -4136,13 +4484,13 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
        adapter->hw.autoneg = 0;
 
        /* Fiber NICs only allow 1000 gbps Full duplex */
-       if((adapter->hw.media_type == e1000_media_type_fiber) &&
+       if ((adapter->hw.media_type == e1000_media_type_fiber) &&
                spddplx != (SPEED_1000 + DUPLEX_FULL)) {
                DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
                return -EINVAL;
        }
 
-       switch(spddplx) {
+       switch (spddplx) {
        case SPEED_10 + DUPLEX_HALF:
                adapter->hw.forced_speed_duplex = e1000_10_half;
                break;
@@ -4168,35 +4516,92 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
 }
 
 #ifdef CONFIG_PM
+/* these functions save and restore 16 or 64 dwords (64-256 bytes) of config
+ * space versus the 64 bytes that pci_[save|restore]_state handle
+ */
+#define PCIE_CONFIG_SPACE_LEN 256
+#define PCI_CONFIG_SPACE_LEN 64
+static int
+e1000_pci_save_state(struct e1000_adapter *adapter)
+{
+       struct pci_dev *dev = adapter->pdev;
+       int size;
+       int i;
+       if (adapter->hw.mac_type >= e1000_82571)
+               size = PCIE_CONFIG_SPACE_LEN;
+       else
+               size = PCI_CONFIG_SPACE_LEN;
+
+       WARN_ON(adapter->config_space != NULL);
+
+       adapter->config_space = kmalloc(size, GFP_KERNEL);
+       if (!adapter->config_space) {
+               DPRINTK(PROBE, ERR, "unable to allocate %d bytes\n", size);
+               return -ENOMEM;
+       }
+       for (i = 0; i < (size / 4); i++)
+               pci_read_config_dword(dev, i * 4, &adapter->config_space[i]);
+       return 0;
+}
+
+static void
+e1000_pci_restore_state(struct e1000_adapter *adapter)
+{
+       struct pci_dev *dev = adapter->pdev;
+       int size;
+       int i;
+       if (adapter->config_space == NULL)
+               return;
+       if (adapter->hw.mac_type >= e1000_82571)
+               size = PCIE_CONFIG_SPACE_LEN;
+       else
+               size = PCI_CONFIG_SPACE_LEN;
+       for (i = 0; i < (size / 4); i++)
+               pci_write_config_dword(dev, i * 4, adapter->config_space[i]);
+       kfree(adapter->config_space);
+       adapter->config_space = NULL;
+       return;
+}
+#endif /* CONFIG_PM */
+
 static int
 e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm;
+       uint32_t ctrl, ctrl_ext, rctl, manc, status;
        uint32_t wufc = adapter->wol;
+       int retval = 0;
 
        netif_device_detach(netdev);
 
-       if(netif_running(netdev))
+       if (netif_running(netdev))
                e1000_down(adapter);
 
+#ifdef CONFIG_PM
+       /* implement our own version of pci_save_state(pdev) because pci 
+        * express adapters have larger 256 byte config spaces */
+       retval = e1000_pci_save_state(adapter);
+       if (retval)
+               return retval;
+#endif
+
        status = E1000_READ_REG(&adapter->hw, STATUS);
-       if(status & E1000_STATUS_LU)
+       if (status & E1000_STATUS_LU)
                wufc &= ~E1000_WUFC_LNKC;
 
-       if(wufc) {
+       if (wufc) {
                e1000_setup_rctl(adapter);
                e1000_set_multi(netdev);
 
                /* turn on all-multi mode if wake on multicast is enabled */
-               if(adapter->wol & E1000_WUFC_MC) {
+               if (adapter->wol & E1000_WUFC_MC) {
                        rctl = E1000_READ_REG(&adapter->hw, RCTL);
                        rctl |= E1000_RCTL_MPE;
                        E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
                }
 
-               if(adapter->hw.mac_type >= e1000_82540) {
+               if (adapter->hw.mac_type >= e1000_82540) {
                        ctrl = E1000_READ_REG(&adapter->hw, CTRL);
                        /* advertise wake from D3Cold */
                        #define E1000_CTRL_ADVD3WUC 0x00100000
@@ -4207,7 +4612,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
                        E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
                }
 
-               if(adapter->hw.media_type == e1000_media_type_fiber ||
+               if (adapter->hw.media_type == e1000_media_type_fiber ||
                   adapter->hw.media_type == e1000_media_type_internal_serdes) {
                        /* keep the laser running in D3 */
                        ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
@@ -4220,96 +4625,96 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 
                E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
                E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
-               pci_enable_wake(pdev, 3, 1);
-               pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
+               retval = pci_enable_wake(pdev, PCI_D3hot, 1);
+               if (retval)
+                       DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+               retval = pci_enable_wake(pdev, PCI_D3cold, 1);
+               if (retval)
+                       DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
        } else {
                E1000_WRITE_REG(&adapter->hw, WUC, 0);
                E1000_WRITE_REG(&adapter->hw, WUFC, 0);
-               pci_enable_wake(pdev, 3, 0);
-               pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
+               retval = pci_enable_wake(pdev, PCI_D3hot, 0);
+               if (retval)
+                       DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+               retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */
+               if (retval)
+                       DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
        }
 
-       pci_save_state(pdev);
-
-       if(adapter->hw.mac_type >= e1000_82540 &&
+       if (adapter->hw.mac_type >= e1000_82540 &&
           adapter->hw.media_type == e1000_media_type_copper) {
                manc = E1000_READ_REG(&adapter->hw, MANC);
-               if(manc & E1000_MANC_SMBUS_EN) {
+               if (manc & E1000_MANC_SMBUS_EN) {
                        manc |= E1000_MANC_ARP_EN;
                        E1000_WRITE_REG(&adapter->hw, MANC, manc);
-                       pci_enable_wake(pdev, 3, 1);
-                       pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
+                       retval = pci_enable_wake(pdev, PCI_D3hot, 1);
+                       if (retval)
+                               DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+                       retval = pci_enable_wake(pdev, PCI_D3cold, 1);
+                       if (retval)
+                               DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
                }
        }
 
-       switch(adapter->hw.mac_type) {
-       case e1000_82571:
-       case e1000_82572:
-               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
-               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
-                               ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
-               break;
-       case e1000_82573:
-               swsm = E1000_READ_REG(&adapter->hw, SWSM);
-               E1000_WRITE_REG(&adapter->hw, SWSM,
-                               swsm & ~E1000_SWSM_DRV_LOAD);
-               break;
-       default:
-               break;
-       }
+       /* Release control of h/w to f/w.  If f/w is AMT enabled, this
+        * would have already happened in close and is redundant. */
+       e1000_release_hw_control(adapter);
 
        pci_disable_device(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+       retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       if (retval)
+               DPRINTK(PROBE, ERR, "Error in setting power state\n");
 
        return 0;
 }
 
+#ifdef CONFIG_PM
 static int
 e1000_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       uint32_t manc, ret_val, swsm;
-       uint32_t ctrl_ext;
+       int retval;
+       uint32_t manc, ret_val;
 
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
+       retval = pci_set_power_state(pdev, PCI_D0);
+       if (retval)
+               DPRINTK(PROBE, ERR, "Error in setting power state\n");
+       e1000_pci_restore_state(adapter);
        ret_val = pci_enable_device(pdev);
        pci_set_master(pdev);
 
-       pci_enable_wake(pdev, PCI_D3hot, 0);
-       pci_enable_wake(pdev, PCI_D3cold, 0);
+       retval = pci_enable_wake(pdev, PCI_D3hot, 0);
+       if (retval)
+               DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
+       retval = pci_enable_wake(pdev, PCI_D3cold, 0);
+       if (retval)
+               DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
 
        e1000_reset(adapter);
        E1000_WRITE_REG(&adapter->hw, WUS, ~0);
 
-       if(netif_running(netdev))
+       if (netif_running(netdev))
                e1000_up(adapter);
 
        netif_device_attach(netdev);
 
-       if(adapter->hw.mac_type >= e1000_82540 &&
+       if (adapter->hw.mac_type >= e1000_82540 &&
           adapter->hw.media_type == e1000_media_type_copper) {
                manc = E1000_READ_REG(&adapter->hw, MANC);
                manc &= ~(E1000_MANC_ARP_EN);
                E1000_WRITE_REG(&adapter->hw, MANC, manc);
        }
 
-       switch(adapter->hw.mac_type) {
-       case e1000_82571:
-       case e1000_82572:
-               ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
-               E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
-                               ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
-               break;
-       case e1000_82573:
-               swsm = E1000_READ_REG(&adapter->hw, SWSM);
-               E1000_WRITE_REG(&adapter->hw, SWSM,
-                               swsm | E1000_SWSM_DRV_LOAD);
-               break;
-       default:
-               break;
-       }
+       /* If the controller is 82573 and f/w is AMT, do not set
+        * DRV_LOAD until the interface is up.  For all other cases,
+        * let the f/w know that the h/w is now under the control
+        * of the driver. */
+       if (adapter->hw.mac_type != e1000_82573 ||
+           !e1000_check_mng_mode(&adapter->hw))
+               e1000_get_hw_control(adapter);
 
        return 0;
 }
@@ -4327,6 +4732,9 @@ e1000_netpoll(struct net_device *netdev)
        disable_irq(adapter->pdev->irq);
        e1000_intr(adapter->pdev->irq, netdev, NULL);
        e1000_clean_tx_irq(adapter, adapter->tx_ring);
+#ifndef CONFIG_E1000_NAPI
+       adapter->clean_rx(adapter, adapter->rx_ring);
+#endif
        enable_irq(adapter->pdev->irq);
 }
 #endif
index aac64de6143708b5c7aca85aaeb70a50f33188b4..9790db974dc1dcc3ff8f3dfc953727d07c8e14a2 100644 (file)
@@ -47,7 +47,7 @@
                                BUG(); \
                        } else { \
                                msleep(x); \
-                       } } while(0)
+                       } } while (0)
 
 /* Some workarounds require millisecond delays and are run during interrupt
  * context.  Most notably, when establishing link, the phy may need tweaking
index ccbbe5ad8e0fb230cd8477fe4df0b2907cea4756..3768d83cd5774e9c4c2c4ca9982f8f14adbd696b 100644 (file)
@@ -177,7 +177,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
  *
  * Valid Range: 100-100000 (0=off, 1=dynamic)
  *
- * Default Value: 1
+ * Default Value: 8000
  */
 
 E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
@@ -227,7 +227,7 @@ static int __devinit
 e1000_validate_option(int *value, struct e1000_option *opt,
                struct e1000_adapter *adapter)
 {
-       if(*value == OPTION_UNSET) {
+       if (*value == OPTION_UNSET) {
                *value = opt->def;
                return 0;
        }
@@ -244,7 +244,7 @@ e1000_validate_option(int *value, struct e1000_option *opt,
                }
                break;
        case range_option:
-               if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+               if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
                        DPRINTK(PROBE, INFO,
                                        "%s set to %i\n", opt->name, *value);
                        return 0;
@@ -254,10 +254,10 @@ e1000_validate_option(int *value, struct e1000_option *opt,
                int i;
                struct e1000_opt_list *ent;
 
-               for(i = 0; i < opt->arg.l.nr; i++) {
+               for (i = 0; i < opt->arg.l.nr; i++) {
                        ent = &opt->arg.l.p[i];
-                       if(*value == ent->i) {
-                               if(ent->str[0] != '\0')
+                       if (*value == ent->i) {
+                               if (ent->str[0] != '\0')
                                        DPRINTK(PROBE, INFO, "%s\n", ent->str);
                                return 0;
                        }
@@ -291,7 +291,7 @@ void __devinit
 e1000_check_options(struct e1000_adapter *adapter)
 {
        int bd = adapter->bd_number;
-       if(bd >= E1000_MAX_NIC) {
+       if (bd >= E1000_MAX_NIC) {
                DPRINTK(PROBE, NOTICE,
                       "Warning: no configuration for board #%i\n", bd);
                DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
@@ -315,12 +315,12 @@ e1000_check_options(struct e1000_adapter *adapter)
                if (num_TxDescriptors > bd) {
                        tx_ring->count = TxDescriptors[bd];
                        e1000_validate_option(&tx_ring->count, &opt, adapter);
-                       E1000_ROUNDUP(tx_ring->count, 
+                       E1000_ROUNDUP(tx_ring->count,
                                                REQ_TX_DESCRIPTOR_MULTIPLE);
                } else {
                        tx_ring->count = opt.def;
                }
-               for (i = 0; i < adapter->num_queues; i++)
+               for (i = 0; i < adapter->num_tx_queues; i++)
                        tx_ring[i].count = tx_ring->count;
        }
        { /* Receive Descriptor Count */
@@ -341,12 +341,12 @@ e1000_check_options(struct e1000_adapter *adapter)
                if (num_RxDescriptors > bd) {
                        rx_ring->count = RxDescriptors[bd];
                        e1000_validate_option(&rx_ring->count, &opt, adapter);
-                       E1000_ROUNDUP(rx_ring->count, 
+                       E1000_ROUNDUP(rx_ring->count,
                                                REQ_RX_DESCRIPTOR_MULTIPLE);
                } else {
                        rx_ring->count = opt.def;
                }
-               for (i = 0; i < adapter->num_queues; i++)
+               for (i = 0; i < adapter->num_rx_queues; i++)
                        rx_ring[i].count = rx_ring->count;
        }
        { /* Checksum Offload Enable/Disable */
@@ -388,7 +388,7 @@ e1000_check_options(struct e1000_adapter *adapter)
                        e1000_validate_option(&fc, &opt, adapter);
                        adapter->hw.fc = adapter->hw.original_fc = fc;
                } else {
-                       adapter->hw.fc = opt.def;
+                       adapter->hw.fc = adapter->hw.original_fc = opt.def;
                }
        }
        { /* Transmit Interrupt Delay */
@@ -403,7 +403,7 @@ e1000_check_options(struct e1000_adapter *adapter)
 
                if (num_TxIntDelay > bd) {
                        adapter->tx_int_delay = TxIntDelay[bd];
-                       e1000_validate_option(&adapter->tx_int_delay, &opt, 
+                       e1000_validate_option(&adapter->tx_int_delay, &opt,
                                                                adapter);
                } else {
                        adapter->tx_int_delay = opt.def;
@@ -421,7 +421,7 @@ e1000_check_options(struct e1000_adapter *adapter)
 
                if (num_TxAbsIntDelay > bd) {
                        adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
-                       e1000_validate_option(&adapter->tx_abs_int_delay, &opt, 
+                       e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
                                                                adapter);
                } else {
                        adapter->tx_abs_int_delay = opt.def;
@@ -439,7 +439,7 @@ e1000_check_options(struct e1000_adapter *adapter)
 
                if (num_RxIntDelay > bd) {
                        adapter->rx_int_delay = RxIntDelay[bd];
-                       e1000_validate_option(&adapter->rx_int_delay, &opt, 
+                       e1000_validate_option(&adapter->rx_int_delay, &opt,
                                                                adapter);
                } else {
                        adapter->rx_int_delay = opt.def;
@@ -457,7 +457,7 @@ e1000_check_options(struct e1000_adapter *adapter)
 
                if (num_RxAbsIntDelay > bd) {
                        adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
-                       e1000_validate_option(&adapter->rx_abs_int_delay, &opt, 
+                       e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
                                                                adapter);
                } else {
                        adapter->rx_abs_int_delay = opt.def;
@@ -475,17 +475,17 @@ e1000_check_options(struct e1000_adapter *adapter)
 
                if (num_InterruptThrottleRate > bd) {
                        adapter->itr = InterruptThrottleRate[bd];
-                       switch(adapter->itr) {
+                       switch (adapter->itr) {
                        case 0:
-                               DPRINTK(PROBE, INFO, "%s turned off\n", 
+                               DPRINTK(PROBE, INFO, "%s turned off\n",
                                        opt.name);
                                break;
                        case 1:
-                               DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 
+                               DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
                                        opt.name);
                                break;
                        default:
-                               e1000_validate_option(&adapter->itr, &opt, 
+                               e1000_validate_option(&adapter->itr, &opt,
                                        adapter);
                                break;
                        }
@@ -494,7 +494,7 @@ e1000_check_options(struct e1000_adapter *adapter)
                }
        }
 
-       switch(adapter->hw.media_type) {
+       switch (adapter->hw.media_type) {
        case e1000_media_type_fiber:
        case e1000_media_type_internal_serdes:
                e1000_check_fiber_options(adapter);
@@ -518,17 +518,17 @@ static void __devinit
 e1000_check_fiber_options(struct e1000_adapter *adapter)
 {
        int bd = adapter->bd_number;
-       if(num_Speed > bd) {
+       if (num_Speed > bd) {
                DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
                       "parameter ignored\n");
        }
 
-       if(num_Duplex > bd) {
+       if (num_Duplex > bd) {
                DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
                       "parameter ignored\n");
        }
 
-       if((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+       if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
                DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
                                 "not valid for fiber adapters, "
                                 "parameter ignored\n");
@@ -584,6 +584,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
                                         .p = dplx_list }}
                };
 
+               if (e1000_check_phy_reset_block(&adapter->hw)) {
+                       DPRINTK(PROBE, INFO,
+                               "Link active due to SoL/IDER Session. "
+                               "Speed/Duplex/AutoNeg parameter ignored.\n");
+                       return;
+               }
                if (num_Duplex > bd) {
                        dplx = Duplex[bd];
                        e1000_validate_option(&dplx, &opt, adapter);
@@ -592,7 +598,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
                }
        }
 
-       if((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+       if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
                DPRINTK(PROBE, INFO,
                       "AutoNeg specified along with Speed or Duplex, "
                       "parameter ignored\n");
@@ -653,7 +659,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
        switch (speed + dplx) {
        case 0:
                adapter->hw.autoneg = adapter->fc_autoneg = 1;
-               if((num_Speed > bd) && (speed != 0 || dplx != 0))
+               if ((num_Speed > bd) && (speed != 0 || dplx != 0))
                        DPRINTK(PROBE, INFO,
                               "Speed and duplex autonegotiation enabled\n");
                break;
index 22c3a37bba5a3eaed8e8117e7045efe5257bcfa4..7ef4b0434a3fa74cd438ce5d8abb45cc8ed5df22 100644 (file)
@@ -35,6 +35,8 @@
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
 
 #include <linux/bitops.h>
 #include <linux/delay.h>
 /* Constants */
 #define VLAN_HLEN              4
 #define FCS_LEN                        4
-#define WRAP                   NET_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
+#define DMA_ALIGN              8       /* hw requires 8-byte alignment */
+#define HW_IP_ALIGN            2       /* hw aligns IP header */
+#define WRAP                   HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
 #define RX_SKB_SIZE            ((dev->mtu + WRAP + 7) & ~0x7)
 
-#define INT_CAUSE_UNMASK_ALL           0x0007ffff
-#define INT_CAUSE_UNMASK_ALL_EXT       0x0011ffff
-#define INT_CAUSE_MASK_ALL             0x00000000
-#define INT_CAUSE_MASK_ALL_EXT         0x00000000
+#define INT_UNMASK_ALL                 0x0007ffff
+#define INT_UNMASK_ALL_EXT             0x0011ffff
+#define INT_MASK_ALL                   0x00000000
+#define INT_MASK_ALL_EXT               0x00000000
 #define INT_CAUSE_CHECK_BITS           INT_CAUSE_UNMASK_ALL
 #define INT_CAUSE_CHECK_BITS_EXT       INT_CAUSE_UNMASK_ALL_EXT
 
@@ -78,8 +82,9 @@
 static int eth_port_link_is_up(unsigned int eth_port_num);
 static void eth_port_uc_addr_get(struct net_device *dev,
                                                unsigned char *MacAddr);
-static int mv643xx_eth_real_open(struct net_device *);
-static int mv643xx_eth_real_stop(struct net_device *);
+static void eth_port_set_multicast_list(struct net_device *);
+static int mv643xx_eth_open(struct net_device *);
+static int mv643xx_eth_stop(struct net_device *);
 static int mv643xx_eth_change_mtu(struct net_device *, int);
 static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *);
 static void eth_port_init_mac_tables(unsigned int eth_port_num);
@@ -124,15 +129,8 @@ static inline void mv_write(int offset, u32 data)
  */
 static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
 {
-       struct mv643xx_private *mp = netdev_priv(dev);
-       unsigned long flags;
-
-       spin_lock_irqsave(&mp->lock, flags);
-
-       if ((new_mtu > 9500) || (new_mtu < 64)) {
-               spin_unlock_irqrestore(&mp->lock, flags);
+       if ((new_mtu > 9500) || (new_mtu < 64))
                return -EINVAL;
-       }
 
        dev->mtu = new_mtu;
        /*
@@ -142,17 +140,13 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
         * to memory is full, which might fail the open function.
         */
        if (netif_running(dev)) {
-               if (mv643xx_eth_real_stop(dev))
-                       printk(KERN_ERR
-                               "%s: Fatal error on stopping device\n",
-                               dev->name);
-               if (mv643xx_eth_real_open(dev))
+               mv643xx_eth_stop(dev);
+               if (mv643xx_eth_open(dev))
                        printk(KERN_ERR
                                "%s: Fatal error on opening device\n",
                                dev->name);
        }
 
-       spin_unlock_irqrestore(&mp->lock, flags);
        return 0;
 }
 
@@ -170,15 +164,19 @@ static void mv643xx_eth_rx_task(void *data)
        struct mv643xx_private *mp = netdev_priv(dev);
        struct pkt_info pkt_info;
        struct sk_buff *skb;
+       int unaligned;
 
        if (test_and_set_bit(0, &mp->rx_task_busy))
                panic("%s: Error in test_set_bit / clear_bit", dev->name);
 
        while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) {
-               skb = dev_alloc_skb(RX_SKB_SIZE);
+               skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN);
                if (!skb)
                        break;
                mp->rx_ring_skbs++;
+               unaligned = (u32)skb->data & (DMA_ALIGN - 1);
+               if (unaligned)
+                       skb_reserve(skb, DMA_ALIGN - unaligned);
                pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
                pkt_info.byte_cnt = RX_SKB_SIZE;
                pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE,
@@ -189,7 +187,7 @@ static void mv643xx_eth_rx_task(void *data)
                                "%s: Error allocating RX Ring\n", dev->name);
                        break;
                }
-               skb_reserve(skb, 2);
+               skb_reserve(skb, HW_IP_ALIGN);
        }
        clear_bit(0, &mp->rx_task_busy);
        /*
@@ -207,7 +205,7 @@ static void mv643xx_eth_rx_task(void *data)
        else {
                /* Return interrupts */
                mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num),
-                                                       INT_CAUSE_UNMASK_ALL);
+                                                       INT_UNMASK_ALL);
        }
 #endif
 }
@@ -267,6 +265,8 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
                mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
 
        mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config);
+
+       eth_port_set_multicast_list(dev);
 }
 
 /*
@@ -342,8 +342,6 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
        if (!(eth_int_cause_ext & (BIT0 | BIT8)))
                return released;
 
-       spin_lock(&mp->lock);
-
        /* Check only queue 0 */
        while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
                if (pkt_info.cmd_sts & BIT0) {
@@ -351,31 +349,21 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
                        stats->tx_errors++;
                }
 
-               /*
-                * If return_info is different than 0, release the skb.
-                * The case where return_info is not 0 is only in case
-                * when transmitted a scatter/gather packet, where only
-                * last skb releases the whole chain.
-                */
-               if (pkt_info.return_info) {
-                       if (skb_shinfo(pkt_info.return_info)->nr_frags)
-                               dma_unmap_page(NULL, pkt_info.buf_ptr,
-                                               pkt_info.byte_cnt,
-                                               DMA_TO_DEVICE);
-                       else
-                               dma_unmap_single(NULL, pkt_info.buf_ptr,
-                                               pkt_info.byte_cnt,
-                                               DMA_TO_DEVICE);
+               if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
+                       dma_unmap_single(NULL, pkt_info.buf_ptr,
+                                       pkt_info.byte_cnt,
+                                       DMA_TO_DEVICE);
+               else
+                       dma_unmap_page(NULL, pkt_info.buf_ptr,
+                                       pkt_info.byte_cnt,
+                                       DMA_TO_DEVICE);
 
+               if (pkt_info.return_info) {
                        dev_kfree_skb_irq(pkt_info.return_info);
                        released = 0;
-               } else
-                       dma_unmap_page(NULL, pkt_info.buf_ptr,
-                                       pkt_info.byte_cnt, DMA_TO_DEVICE);
+               }
        }
 
-       spin_unlock(&mp->lock);
-
        return released;
 }
 
@@ -456,6 +444,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev)
                        netif_rx(skb);
 #endif
                }
+               dev->last_rx = jiffies;
        }
 
        return received_packets;
@@ -473,7 +462,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev)
  */
 
 static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
-                                                       struct pt_regs *regs)
+                                               struct pt_regs *regs)
 {
        struct net_device *dev = (struct net_device *)dev_id;
        struct mv643xx_private *mp = netdev_priv(dev);
@@ -482,12 +471,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
 
        /* Read interrupt cause registers */
        eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) &
-                                               INT_CAUSE_UNMASK_ALL;
+                                               INT_UNMASK_ALL;
 
        if (eth_int_cause & BIT1)
                eth_int_cause_ext = mv_read(
                        MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
-                                               INT_CAUSE_UNMASK_ALL_EXT;
+                                               INT_UNMASK_ALL_EXT;
 
 #ifdef MV643XX_NAPI
        if (!(eth_int_cause & 0x0007fffd)) {
@@ -512,9 +501,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
        } else {
                if (netif_rx_schedule_prep(dev)) {
                        /* Mask all the interrupts */
-                       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
-                       mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG
-                                                               (port_num), 0);
+                       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
+                                                               INT_MASK_ALL);
+                       /* wait for previous write to complete */
+                       mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
                        __netif_rx_schedule(dev);
                }
 #else
@@ -527,9 +517,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
                 * with skb's.
                 */
 #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
-               /* Unmask all interrupts on ethernet port */
+               /* Mask all interrupts on ethernet port */
                mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
-                                                       INT_CAUSE_MASK_ALL);
+                                                       INT_MASK_ALL);
+               /* wait for previous write to take effect */
+               mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
+
                queue_task(&mp->rx_task, &tq_immediate);
                mark_bh(IMMEDIATE_BH);
 #else
@@ -635,56 +628,6 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
        return coal;
 }
 
-/*
- * mv643xx_eth_open
- *
- * This function is called when openning the network device. The function
- * should initialize all the hardware, initialize cyclic Rx/Tx
- * descriptors chain and buffers and allocate an IRQ to the network
- * device.
- *
- * Input :     a pointer to the network device structure
- *
- * Output :    zero of success , nonzero if fails.
- */
-
-static int mv643xx_eth_open(struct net_device *dev)
-{
-       struct mv643xx_private *mp = netdev_priv(dev);
-       unsigned int port_num = mp->port_num;
-       int err;
-
-       spin_lock_irq(&mp->lock);
-
-       err = request_irq(dev->irq, mv643xx_eth_int_handler,
-                       SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
-
-       if (err) {
-               printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
-                                                               port_num);
-               err = -EAGAIN;
-               goto out;
-       }
-
-       if (mv643xx_eth_real_open(dev)) {
-               printk("%s: Error opening interface\n", dev->name);
-               err = -EBUSY;
-               goto out_free;
-       }
-
-       spin_unlock_irq(&mp->lock);
-
-       return 0;
-
-out_free:
-       free_irq(dev->irq, dev);
-
-out:
-       spin_unlock_irq(&mp->lock);
-
-       return err;
-}
-
 /*
  * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
  *
@@ -777,28 +720,37 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp)
        mp->port_tx_queue_command |= 1;
 }
 
-/* Helper function for mv643xx_eth_open */
-static int mv643xx_eth_real_open(struct net_device *dev)
+/*
+ * mv643xx_eth_open
+ *
+ * This function is called when openning the network device. The function
+ * should initialize all the hardware, initialize cyclic Rx/Tx
+ * descriptors chain and buffers and allocate an IRQ to the network
+ * device.
+ *
+ * Input :     a pointer to the network device structure
+ *
+ * Output :    zero of success , nonzero if fails.
+ */
+
+static int mv643xx_eth_open(struct net_device *dev)
 {
        struct mv643xx_private *mp = netdev_priv(dev);
        unsigned int port_num = mp->port_num;
        unsigned int size;
+       int err;
+
+       err = request_irq(dev->irq, mv643xx_eth_int_handler,
+                       SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
+       if (err) {
+               printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
+                                                               port_num);
+               return -EAGAIN;
+       }
 
        /* Stop RX Queues */
        mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
 
-       /* Clear the ethernet port interrupts */
-       mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
-       mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
-
-       /* Unmask RX buffer and TX end interrupt */
-       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
-                                               INT_CAUSE_UNMASK_ALL);
-
-       /* Unmask phy and link status changes interrupts */
-       mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
-                                               INT_CAUSE_UNMASK_ALL_EXT);
-
        /* Set the MAC Address */
        memcpy(mp->port_mac_addr, dev->dev_addr, 6);
 
@@ -818,14 +770,15 @@ static int mv643xx_eth_real_open(struct net_device *dev)
                                                                GFP_KERNEL);
        if (!mp->rx_skb) {
                printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto out_free_irq;
        }
        mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
                                                                GFP_KERNEL);
        if (!mp->tx_skb) {
                printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
-               kfree(mp->rx_skb);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto out_free_rx_skb;
        }
 
        /* Allocate TX ring */
@@ -845,9 +798,8 @@ static int mv643xx_eth_real_open(struct net_device *dev)
        if (!mp->p_tx_desc_area) {
                printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
                                                        dev->name, size);
-               kfree(mp->rx_skb);
-               kfree(mp->tx_skb);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto out_free_tx_skb;
        }
        BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */
        memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
@@ -874,13 +826,12 @@ static int mv643xx_eth_real_open(struct net_device *dev)
                printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
                                                        dev->name);
                if (mp->rx_sram_size)
-                       iounmap(mp->p_rx_desc_area);
+                       iounmap(mp->p_tx_desc_area);
                else
                        dma_free_coherent(NULL, mp->tx_desc_area_size,
                                        mp->p_tx_desc_area, mp->tx_desc_dma);
-               kfree(mp->rx_skb);
-               kfree(mp->tx_skb);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto out_free_tx_skb;
        }
        memset((void *)mp->p_rx_desc_area, 0, size);
 
@@ -900,9 +851,26 @@ static int mv643xx_eth_real_open(struct net_device *dev)
        mp->tx_int_coal =
                eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
 
-       netif_start_queue(dev);
+       /* Clear any pending ethernet port interrupts */
+       mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
+       mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
+
+       /* Unmask phy and link status changes interrupts */
+       mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
+                                               INT_UNMASK_ALL_EXT);
 
+       /* Unmask RX buffer and TX end interrupt */
+       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
        return 0;
+
+out_free_tx_skb:
+       kfree(mp->tx_skb);
+out_free_rx_skb:
+       kfree(mp->rx_skb);
+out_free_irq:
+       free_irq(dev->irq, dev);
+
+       return err;
 }
 
 static void mv643xx_eth_free_tx_rings(struct net_device *dev)
@@ -910,14 +878,17 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
        struct mv643xx_private *mp = netdev_priv(dev);
        unsigned int port_num = mp->port_num;
        unsigned int curr;
+       struct sk_buff *skb;
 
        /* Stop Tx Queues */
        mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
 
        /* Free outstanding skb's on TX rings */
        for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) {
-               if (mp->tx_skb[curr]) {
-                       dev_kfree_skb(mp->tx_skb[curr]);
+               skb = mp->tx_skb[curr];
+               if (skb) {
+                       mp->tx_ring_skbs -= skb_shinfo(skb)->nr_frags;
+                       dev_kfree_skb(skb);
                        mp->tx_ring_skbs--;
                }
        }
@@ -973,44 +944,32 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev)
  * Output :    zero if success , nonzero if fails
  */
 
-/* Helper function for mv643xx_eth_stop */
-
-static int mv643xx_eth_real_stop(struct net_device *dev)
+static int mv643xx_eth_stop(struct net_device *dev)
 {
        struct mv643xx_private *mp = netdev_priv(dev);
        unsigned int port_num = mp->port_num;
 
+       /* Mask all interrupts on ethernet port */
+       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
+       /* wait for previous write to complete */
+       mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
+
+#ifdef MV643XX_NAPI
+       netif_poll_disable(dev);
+#endif
        netif_carrier_off(dev);
        netif_stop_queue(dev);
 
-       mv643xx_eth_free_tx_rings(dev);
-       mv643xx_eth_free_rx_rings(dev);
-
        eth_port_reset(mp->port_num);
 
-       /* Disable ethernet port interrupts */
-       mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
-       mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
-
-       /* Mask RX buffer and TX end interrupt */
-       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
-
-       /* Mask phy and link status changes interrupts */
-       mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
-
-       return 0;
-}
-
-static int mv643xx_eth_stop(struct net_device *dev)
-{
-       struct mv643xx_private *mp = netdev_priv(dev);
-
-       spin_lock_irq(&mp->lock);
+       mv643xx_eth_free_tx_rings(dev);
+       mv643xx_eth_free_rx_rings(dev);
 
-       mv643xx_eth_real_stop(dev);
+#ifdef MV643XX_NAPI
+       netif_poll_enable(dev);
+#endif
 
        free_irq(dev->irq, dev);
-       spin_unlock_irq(&mp->lock);
 
        return 0;
 }
@@ -1022,20 +981,17 @@ static void mv643xx_tx(struct net_device *dev)
        struct pkt_info pkt_info;
 
        while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
-               if (pkt_info.return_info) {
-                       if (skb_shinfo(pkt_info.return_info)->nr_frags)
-                               dma_unmap_page(NULL, pkt_info.buf_ptr,
-                                               pkt_info.byte_cnt,
-                                               DMA_TO_DEVICE);
-                       else
-                               dma_unmap_single(NULL, pkt_info.buf_ptr,
-                                               pkt_info.byte_cnt,
-                                               DMA_TO_DEVICE);
+               if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
+                       dma_unmap_single(NULL, pkt_info.buf_ptr,
+                                       pkt_info.byte_cnt,
+                                       DMA_TO_DEVICE);
+               else
+                       dma_unmap_page(NULL, pkt_info.buf_ptr,
+                                       pkt_info.byte_cnt,
+                                       DMA_TO_DEVICE);
 
+               if (pkt_info.return_info)
                        dev_kfree_skb_irq(pkt_info.return_info);
-               } else
-                       dma_unmap_page(NULL, pkt_info.buf_ptr,
-                                       pkt_info.byte_cnt, DMA_TO_DEVICE);
        }
 
        if (netif_queue_stopped(dev) &&
@@ -1053,14 +1009,11 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
        struct mv643xx_private *mp = netdev_priv(dev);
        int done = 1, orig_budget, work_done;
        unsigned int port_num = mp->port_num;
-       unsigned long flags;
 
 #ifdef MV643XX_TX_FAST_REFILL
        if (++mp->tx_clean_threshold > 5) {
-               spin_lock_irqsave(&mp->lock, flags);
                mv643xx_tx(dev);
                mp->tx_clean_threshold = 0;
-               spin_unlock_irqrestore(&mp->lock, flags);
        }
 #endif
 
@@ -1078,21 +1031,35 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
        }
 
        if (done) {
-               spin_lock_irqsave(&mp->lock, flags);
-               __netif_rx_complete(dev);
+               netif_rx_complete(dev);
                mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
                mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
                mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
-                                               INT_CAUSE_UNMASK_ALL);
-               mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
-                                               INT_CAUSE_UNMASK_ALL_EXT);
-               spin_unlock_irqrestore(&mp->lock, flags);
+                                               INT_UNMASK_ALL);
        }
 
        return done ? 0 : 1;
 }
 #endif
 
+/* Hardware can't handle unaligned fragments smaller than 9 bytes.
+ * This helper function detects that case.
+ */
+
+static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
+{
+       unsigned int frag;
+       skb_frag_t *fragp;
+
+       for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+               fragp = &skb_shinfo(skb)->frags[frag];
+               if (fragp->size <= 8 && fragp->page_offset & 0x7)
+                       return 1;
+       }
+       return 0;
+}
+
+
 /*
  * mv643xx_eth_start_xmit
  *
@@ -1136,12 +1103,19 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return 1;
        }
 
+#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
+       if (has_tiny_unaligned_frags(skb)) {
+               if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
+                       stats->tx_dropped++;
+                       printk(KERN_DEBUG "%s: failed to linearize tiny "
+                                       "unaligned fragment\n", dev->name);
+                       return 1;
+               }
+       }
+
        spin_lock_irqsave(&mp->lock, flags);
 
-       /* Update packet info data structure -- DMA owned, first last */
-#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
        if (!skb_shinfo(skb)->nr_frags) {
-linear:
                if (skb->ip_summed != CHECKSUM_HW) {
                        /* Errata BTS #50, IHL must be 5 if no HW checksum */
                        pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
@@ -1150,7 +1124,6 @@ linear:
                                           5 << ETH_TX_IHL_SHIFT;
                        pkt_info.l4i_chk = 0;
                } else {
-
                        pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
                                           ETH_TX_FIRST_DESC |
                                           ETH_TX_LAST_DESC |
@@ -1158,14 +1131,16 @@ linear:
                                           ETH_GEN_IP_V_4_CHECKSUM |
                                           skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
                        /* CPU already calculated pseudo header checksum. */
-                       if (skb->nh.iph->protocol == IPPROTO_UDP) {
+                       if ((skb->protocol == ETH_P_IP) &&
+                           (skb->nh.iph->protocol == IPPROTO_UDP) ) {
                                pkt_info.cmd_sts |= ETH_UDP_FRAME;
                                pkt_info.l4i_chk = skb->h.uh->check;
-                       } else if (skb->nh.iph->protocol == IPPROTO_TCP)
+                       } else if ((skb->protocol == ETH_P_IP) &&
+                                  (skb->nh.iph->protocol == IPPROTO_TCP))
                                pkt_info.l4i_chk = skb->h.th->check;
                        else {
                                printk(KERN_ERR
-                                       "%s: chksum proto != TCP or UDP\n",
+                                       "%s: chksum proto != IPv4 TCP or UDP\n",
                                        dev->name);
                                spin_unlock_irqrestore(&mp->lock, flags);
                                return 1;
@@ -1183,26 +1158,6 @@ linear:
        } else {
                unsigned int frag;
 
-               /* Since hardware can't handle unaligned fragments smaller
-                * than 9 bytes, if we find any, we linearize the skb
-                * and start again.  When I've seen it, it's always been
-                * the first frag (probably near the end of the page),
-                * but we check all frags to be safe.
-                */
-               for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
-                       skb_frag_t *fragp;
-
-                       fragp = &skb_shinfo(skb)->frags[frag];
-                       if (fragp->size <= 8 && fragp->page_offset & 0x7) {
-                               skb_linearize(skb, GFP_ATOMIC);
-                               printk(KERN_DEBUG "%s: unaligned tiny fragment"
-                                               "%d of %d, fixed\n",
-                                               dev->name, frag,
-                                               skb_shinfo(skb)->nr_frags);
-                               goto linear;
-                       }
-               }
-
                /* first frag which is skb header */
                pkt_info.byte_cnt = skb_headlen(skb);
                pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
@@ -1221,14 +1176,16 @@ linear:
                                           ETH_GEN_IP_V_4_CHECKSUM |
                                           skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
                        /* CPU already calculated pseudo header checksum. */
-                       if (skb->nh.iph->protocol == IPPROTO_UDP) {
+                       if ((skb->protocol == ETH_P_IP) &&
+                           (skb->nh.iph->protocol == IPPROTO_UDP)) {
                                pkt_info.cmd_sts |= ETH_UDP_FRAME;
                                pkt_info.l4i_chk = skb->h.uh->check;
-                       } else if (skb->nh.iph->protocol == IPPROTO_TCP)
+                       } else if ((skb->protocol == ETH_P_IP) &&
+                                  (skb->nh.iph->protocol == IPPROTO_TCP))
                                pkt_info.l4i_chk = skb->h.th->check;
                        else {
                                printk(KERN_ERR
-                                       "%s: chksum proto != TCP or UDP\n",
+                                       "%s: chksum proto != IPv4 TCP or UDP\n",
                                        dev->name);
                                spin_unlock_irqrestore(&mp->lock, flags);
                                return 1;
@@ -1288,6 +1245,8 @@ linear:
                }
        }
 #else
+       spin_lock_irqsave(&mp->lock, flags);
+
        pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC |
                                                        ETH_TX_LAST_DESC;
        pkt_info.l4i_chk = 0;
@@ -1340,39 +1299,18 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static inline void mv643xx_enable_irq(struct mv643xx_private *mp)
-{
-       int port_num = mp->port_num;
-       unsigned long flags;
-
-       spin_lock_irqsave(&mp->lock, flags);
-       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
-                                       INT_CAUSE_UNMASK_ALL);
-       mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
-                                       INT_CAUSE_UNMASK_ALL_EXT);
-       spin_unlock_irqrestore(&mp->lock, flags);
-}
-
-static inline void mv643xx_disable_irq(struct mv643xx_private *mp)
-{
-       int port_num = mp->port_num;
-       unsigned long flags;
-
-       spin_lock_irqsave(&mp->lock, flags);
-       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
-                                       INT_CAUSE_MASK_ALL);
-       mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
-                                       INT_CAUSE_MASK_ALL_EXT);
-       spin_unlock_irqrestore(&mp->lock, flags);
-}
-
 static void mv643xx_netpoll(struct net_device *netdev)
 {
        struct mv643xx_private *mp = netdev_priv(netdev);
+       int port_num = mp->port_num;
+
+       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
+       /* wait for previous write to complete */
+       mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
 
-       mv643xx_disable_irq(mp);
        mv643xx_eth_int_handler(netdev->irq, netdev, NULL);
-       mv643xx_enable_irq(mp);
+
+       mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
 }
 #endif
 
@@ -1441,7 +1379,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
         * Zero copy can only work if we use Discovery II memory. Else, we will
         * have to map the buffers to ISA memory which is only 16 MB
         */
-       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM;
+       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
 #endif
 #endif
 
@@ -2053,6 +1991,196 @@ static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
        return 1;
 }
 
+/*
+ * The entries in each table are indexed by a hash of a packet's MAC
+ * address.  One bit in each entry determines whether the packet is
+ * accepted.  There are 4 entries (each 8 bits wide) in each register
+ * of the table.  The bits in each entry are defined as follows:
+ *     0       Accept=1, Drop=0
+ *     3-1     Queue                   (ETH_Q0=0)
+ *     7-4     Reserved = 0;
+ */
+static void eth_port_set_filter_table_entry(int table, unsigned char entry)
+{
+       unsigned int table_reg;
+       unsigned int tbl_offset;
+       unsigned int reg_offset;
+
+       tbl_offset = (entry / 4) * 4;   /* Register offset of DA table entry */
+       reg_offset = entry % 4;         /* Entry offset within the register */
+
+       /* Set "accepts frame bit" at specified table entry */
+       table_reg = mv_read(table + tbl_offset);
+       table_reg |= 0x01 << (8 * reg_offset);
+       mv_write(table + tbl_offset, table_reg);
+}
+
+/*
+ * eth_port_mc_addr - Multicast address settings.
+ *
+ * The MV device supports multicast using two tables:
+ * 1) Special Multicast Table for MAC addresses of the form
+ *    0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF).
+ *    The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+ *    Table entries in the DA-Filter table.
+ * 2) Other Multicast Table for multicast of another type. A CRC-8bit
+ *    is used as an index to the Other Multicast Table entries in the
+ *    DA-Filter table.  This function calculates the CRC-8bit value.
+ * In either case, eth_port_set_filter_table_entry() is then called
+ * to set to set the actual table entry.
+ */
+static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
+{
+       unsigned int mac_h;
+       unsigned int mac_l;
+       unsigned char crc_result = 0;
+       int table;
+       int mac_array[48];
+       int crc[8];
+       int i;
+
+       if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
+           (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
+               table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
+                                       (eth_port_num);
+               eth_port_set_filter_table_entry(table, p_addr[5]);
+               return;
+       }
+
+       /* Calculate CRC-8 out of the given address */
+       mac_h = (p_addr[0] << 8) | (p_addr[1]);
+       mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
+                       (p_addr[4] << 8) | (p_addr[5] << 0);
+
+       for (i = 0; i < 32; i++)
+               mac_array[i] = (mac_l >> i) & 0x1;
+       for (i = 32; i < 48; i++)
+               mac_array[i] = (mac_h >> (i - 32)) & 0x1;
+
+       crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^
+                mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^
+                mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
+                mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^
+                mac_array[8]  ^ mac_array[7]  ^ mac_array[6]  ^ mac_array[0];
+
+       crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
+                mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^
+                mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
+                mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^
+                mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^
+                mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
+                mac_array[9]  ^ mac_array[6]  ^ mac_array[1]  ^ mac_array[0];
+
+       crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^
+                mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^
+                mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
+                mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^
+                mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8]  ^
+                mac_array[6]  ^ mac_array[2]  ^ mac_array[1]  ^ mac_array[0];
+
+       crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
+                mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^
+                mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
+                mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
+                mac_array[13] ^ mac_array[11] ^ mac_array[9]  ^ mac_array[7]  ^
+                mac_array[3]  ^ mac_array[2]  ^ mac_array[1];
+
+       crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^
+                mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^
+                mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
+                mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^
+                mac_array[12] ^ mac_array[10] ^ mac_array[8]  ^ mac_array[4]  ^
+                mac_array[3]  ^ mac_array[2];
+
+       crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^
+                mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^
+                mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
+                mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^
+                mac_array[13] ^ mac_array[11] ^ mac_array[9]  ^ mac_array[5]  ^
+                mac_array[4]  ^ mac_array[3];
+
+       crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^
+                mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^
+                mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
+                mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^
+                mac_array[12] ^ mac_array[10] ^ mac_array[6]  ^ mac_array[5]  ^
+                mac_array[4];
+
+       crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^
+                mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^
+                mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
+                mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^
+                mac_array[11] ^ mac_array[7]  ^ mac_array[6]  ^ mac_array[5];
+
+       for (i = 0; i < 8; i++)
+               crc_result = crc_result | (crc[i] << i);
+
+       table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num);
+       eth_port_set_filter_table_entry(table, crc_result);
+}
+
+/*
+ * Set the entire multicast list based on dev->mc_list.
+ */
+static void eth_port_set_multicast_list(struct net_device *dev)
+{
+
+       struct dev_mc_list      *mc_list;
+       int                     i;
+       int                     table_index;
+       struct mv643xx_private  *mp = netdev_priv(dev);
+       unsigned int            eth_port_num = mp->port_num;
+
+       /* If the device is in promiscuous mode or in all multicast mode,
+        * we will fully populate both multicast tables with accept.
+        * This is guaranteed to yield a match on all multicast addresses...
+        */
+       if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
+               for (table_index = 0; table_index <= 0xFC; table_index += 4) {
+                       /* Set all entries in DA filter special multicast
+                        * table (Ex_dFSMT)
+                        * Set for ETH_Q0 for now
+                        * Bits
+                        * 0      Accept=1, Drop=0
+                        * 3-1  Queue    ETH_Q0=0
+                        * 7-4  Reserved = 0;
+                        */
+                       mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
+
+                       /* Set all entries in DA filter other multicast
+                        * table (Ex_dFOMT)
+                        * Set for ETH_Q0 for now
+                        * Bits
+                        * 0      Accept=1, Drop=0
+                        * 3-1  Queue    ETH_Q0=0
+                        * 7-4  Reserved = 0;
+                        */
+                       mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
+               }
+               return;
+       }
+
+       /* We will clear out multicast tables every time we get the list.
+        * Then add the entire new list...
+        */
+       for (table_index = 0; table_index <= 0xFC; table_index += 4) {
+               /* Clear DA filter special multicast table (Ex_dFSMT) */
+               mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
+                               (eth_port_num) + table_index, 0);
+
+               /* Clear DA filter other multicast table (Ex_dFOMT) */
+               mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
+                               (eth_port_num) + table_index, 0);
+       }
+
+       /* Get pointer to net_device multicast list and add each one... */
+       for (i = 0, mc_list = dev->mc_list;
+                       (i < 256) && (mc_list != NULL) && (i < dev->mc_count);
+                       i++, mc_list = mc_list->next)
+               if (mc_list->dmi_addrlen == 6)
+                       eth_port_mc_addr(eth_port_num, mc_list->dmi_addr);
+}
+
 /*
  * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
  *
@@ -2080,11 +2208,11 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
 
        for (table_index = 0; table_index <= 0xFC; table_index += 4) {
                /* Clear DA filter special multicast table (Ex_dFSMT) */
-               mv_write((MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
-                                       (eth_port_num) + table_index), 0);
+               mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
+                                       (eth_port_num) + table_index, 0);
                /* Clear DA filter other multicast table (Ex_dFOMT) */
-               mv_write((MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
-                                       (eth_port_num) + table_index), 0);
+               mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
+                                       (eth_port_num) + table_index, 0);
        }
 }
 
@@ -2629,23 +2757,27 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
  *     Tx ring 'first' and 'used' indexes are updated.
  *
  * RETURN:
- *     ETH_ERROR in case the routine can not access Tx desc ring.
- *     ETH_RETRY in case there is transmission in process.
- *     ETH_END_OF_JOB if the routine has nothing to release.
- *     ETH_OK otherwise.
+ *     ETH_OK on success
+ *     ETH_ERROR otherwise.
  *
  */
 static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
                                                struct pkt_info *p_pkt_info)
 {
        int tx_desc_used;
+       int tx_busy_desc;
+       struct eth_tx_desc *p_tx_desc_used;
+       unsigned int command_status;
+       unsigned long flags;
+       int err = ETH_OK;
+
+       spin_lock_irqsave(&mp->lock, flags);
+
 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
-       int tx_busy_desc = mp->tx_first_desc_q;
+       tx_busy_desc = mp->tx_first_desc_q;
 #else
-       int tx_busy_desc = mp->tx_curr_desc_q;
+       tx_busy_desc = mp->tx_curr_desc_q;
 #endif
-       struct eth_tx_desc *p_tx_desc_used;
-       unsigned int command_status;
 
        /* Get the Tx Desc ring indexes */
        tx_desc_used = mp->tx_used_desc_q;
@@ -2653,22 +2785,30 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
        p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
 
        /* Sanity check */
-       if (p_tx_desc_used == NULL)
-               return ETH_ERROR;
+       if (p_tx_desc_used == NULL) {
+               err = ETH_ERROR;
+               goto out;
+       }
 
        /* Stop release. About to overlap the current available Tx descriptor */
-       if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err)
-               return ETH_END_OF_JOB;
+       if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) {
+               err = ETH_ERROR;
+               goto out;
+       }
 
        command_status = p_tx_desc_used->cmd_sts;
 
        /* Still transmitting... */
-       if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
-               return ETH_RETRY;
+       if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
+               err = ETH_ERROR;
+               goto out;
+       }
 
        /* Pass the packet information to the caller */
        p_pkt_info->cmd_sts = command_status;
        p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
+       p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr;
+       p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt;
        mp->tx_skb[tx_desc_used] = NULL;
 
        /* Update the next descriptor to release. */
@@ -2680,7 +2820,10 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
        BUG_ON(mp->tx_ring_skbs == 0);
        mp->tx_ring_skbs--;
 
-       return ETH_OK;
+out:
+       spin_unlock_irqrestore(&mp->lock, flags);
+
+       return err;
 }
 
 /*
@@ -2712,11 +2855,14 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
        int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
        volatile struct eth_rx_desc *p_rx_desc;
        unsigned int command_status;
+       unsigned long flags;
 
        /* Do not process Rx ring in case of Rx ring resource error */
        if (mp->rx_resource_err)
                return ETH_QUEUE_FULL;
 
+       spin_lock_irqsave(&mp->lock, flags);
+
        /* Get the Rx Desc ring 'curr and 'used' indexes */
        rx_curr_desc = mp->rx_curr_desc_q;
        rx_used_desc = mp->rx_used_desc_q;
@@ -2728,8 +2874,10 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
        rmb();
 
        /* Nothing to receive... */
-       if (command_status & (ETH_BUFFER_OWNED_BY_DMA))
+       if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
+               spin_unlock_irqrestore(&mp->lock, flags);
                return ETH_END_OF_JOB;
+       }
 
        p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
        p_pkt_info->cmd_sts = command_status;
@@ -2737,8 +2885,10 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
        p_pkt_info->return_info = mp->rx_skb[rx_curr_desc];
        p_pkt_info->l4i_chk = p_rx_desc->buf_size;
 
-       /* Clean the return info field to indicate that the packet has been */
-       /* moved to the upper layers                                        */
+       /*
+        * Clean the return info field to indicate that the
+        * packet has been moved to the upper layers
+        */
        mp->rx_skb[rx_curr_desc] = NULL;
 
        /* Update current index in data structure */
@@ -2749,6 +2899,8 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
        if (rx_next_curr_desc == rx_used_desc)
                mp->rx_resource_err = 1;
 
+       spin_unlock_irqrestore(&mp->lock, flags);
+
        return ETH_OK;
 }
 
@@ -2777,6 +2929,9 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
 {
        int used_rx_desc;       /* Where to return Rx resource */
        volatile struct eth_rx_desc *p_used_rx_desc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mp->lock, flags);
 
        /* Get 'used' Rx descriptor */
        used_rx_desc = mp->rx_used_desc_q;
@@ -2800,6 +2955,8 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
        /* Any Rx return cancels the Rx resource error status */
        mp->rx_resource_err = 0;
 
+       spin_unlock_irqrestore(&mp->lock, flags);
+
        return ETH_OK;
 }
 
@@ -2812,7 +2969,7 @@ struct mv643xx_stats {
 };
 
 #define MV643XX_STAT(m) sizeof(((struct mv643xx_private *)0)->m), \
-                     offsetof(struct mv643xx_private, m)
+                                       offsetof(struct mv643xx_private, m)
 
 static const struct mv643xx_stats mv643xx_gstrings_stats[] = {
        { "rx_packets", MV643XX_STAT(stats.rx_packets) },
@@ -2963,9 +3120,8 @@ mv643xx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        return 0;
 }
 
-static void
-mv643xx_get_drvinfo(struct net_device *netdev,
-                       struct ethtool_drvinfo *drvinfo)
+static void mv643xx_get_drvinfo(struct net_device *netdev,
+                               struct ethtool_drvinfo *drvinfo)
 {
        strncpy(drvinfo->driver,  mv643xx_driver_name, 32);
        strncpy(drvinfo->version, mv643xx_driver_version, 32);
@@ -2974,39 +3130,37 @@ mv643xx_get_drvinfo(struct net_device *netdev,
        drvinfo->n_stats = MV643XX_STATS_LEN;
 }
 
-static int 
-mv643xx_get_stats_count(struct net_device *netdev)
+static int mv643xx_get_stats_count(struct net_device *netdev)
 {
        return MV643XX_STATS_LEN;
 }
 
-static void 
-mv643xx_get_ethtool_stats(struct net_device *netdev, 
-               struct ethtool_stats *stats, uint64_t *data)
+static void mv643xx_get_ethtool_stats(struct net_device *netdev,
+                               struct ethtool_stats *stats, uint64_t *data)
 {
        struct mv643xx_private *mp = netdev->priv;
        int i;
 
        eth_update_mib_counters(mp);
 
-       for(i = 0; i < MV643XX_STATS_LEN; i++) {
+       for (i = 0; i < MV643XX_STATS_LEN; i++) {
                char *p = (char *)mp+mv643xx_gstrings_stats[i].stat_offset;     
-               data[i] = (mv643xx_gstrings_stats[i].sizeof_stat == 
+               data[i] = (mv643xx_gstrings_stats[i].sizeof_stat ==
                        sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
        }
 }
 
-static void 
-mv643xx_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
+static void mv643xx_get_strings(struct net_device *netdev, uint32_t stringset,
+                               uint8_t *data)
 {
        int i;
 
        switch(stringset) {
        case ETH_SS_STATS:
                for (i=0; i < MV643XX_STATS_LEN; i++) {
-                       memcpy(data + i * ETH_GSTRING_LEN, 
-                       mv643xx_gstrings_stats[i].stat_string,
-                       ETH_GSTRING_LEN);
+                       memcpy(data + i * ETH_GSTRING_LEN,
+                                       mv643xx_gstrings_stats[i].stat_string,
+                                       ETH_GSTRING_LEN);
                }
                break;
        }
index 89c46787676c9d9d3249597b38ce788a56fb5bc4..49b597cbc19a076e1ed2e737ff68b7b653922044 100644 (file)
@@ -3586,7 +3586,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
                txdp->Buffer_Pointer = (u64) pci_map_page
                    (sp->pdev, frag->page, frag->page_offset,
                     frag->size, PCI_DMA_TODEVICE);
-               txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
+               txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
                if (skb_shinfo(skb)->ufo_size)
                        txdp->Control_1 |= TXD_UFO_EN;
        }
index b538e3038058a7afe022a83fbadf9876dac0ffbb..bf55a4cfb3d25e6a8c401f7518b98bb5e9b74f56 100644 (file)
@@ -3243,12 +3243,22 @@ static int __devinit skge_probe(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-       if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)))
+       if (sizeof(dma_addr_t) > sizeof(u32) &&
+           !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
                using_dac = 1;
-       else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
-               printk(KERN_ERR PFX "%s no usable DMA configuration\n",
-                      pci_name(pdev));
-               goto err_out_free_regions;
+               err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+               if (err < 0) {
+                       printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
+                              "for consistent allocations\n", pci_name(pdev));
+                       goto err_out_free_regions;
+               }
+       } else {
+               err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+               if (err) {
+                       printk(KERN_ERR PFX "%s no usable DMA configuration\n",
+                              pci_name(pdev));
+                       goto err_out_free_regions;
+               }
        }
 
 #ifdef __BIG_ENDIAN
index f5d697c0c0315e6fa0594a4bb216a5ab89b234e1..f8b973a04b657adcaaaaaf4c8103c9ecea2ce4d2 100644 (file)
@@ -57,7 +57,7 @@
 #include "sky2.h"
 
 #define DRV_NAME               "sky2"
-#define DRV_VERSION            "0.11"
+#define DRV_VERSION            "0.13"
 #define PFX                    DRV_NAME " "
 
 /*
@@ -75,6 +75,7 @@
 #define RX_LE_BYTES            (RX_LE_SIZE*sizeof(struct sky2_rx_le))
 #define RX_MAX_PENDING         (RX_LE_SIZE/2 - 2)
 #define RX_DEF_PENDING         RX_MAX_PENDING
+#define RX_SKB_ALIGN           8
 
 #define TX_RING_SIZE           512
 #define TX_DEF_PENDING         (TX_RING_SIZE - 1)
@@ -91,7 +92,7 @@
 static const u32 default_msg =
     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
     | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
-    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_INTR;
+    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
 
 static int debug = -1;         /* defaults above */
 module_param(debug, int, 0);
@@ -624,13 +625,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
 
 }
 
-static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
+/* Assign Ram Buffer allocation.
+ * start and end are in units of 4k bytes
+ * ram registers are in units of 64bit words
+ */
+static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk)
 {
-       u32 end;
+       u32 start, end;
 
-       start /= 8;
-       len /= 8;
-       end = start + len - 1;
+       start = startk * 4096/8;
+       end = (endk * 4096/8) - 1;
 
        sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
        sky2_write32(hw, RB_ADDR(q, RB_START), start);
@@ -639,14 +643,19 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
        sky2_write32(hw, RB_ADDR(q, RB_RP), start);
 
        if (q == Q_R1 || q == Q_R2) {
-               u32 rxup, rxlo;
+               u32 space = (endk - startk) * 4096/8;
+               u32 tp = space - space/4;
 
-               rxlo = len/2;
-               rxup = rxlo + len/4;
+               /* On receive queue's set the thresholds
+                * give receiver priority when > 3/4 full
+                * send pause when down to 2K
+                */
+               sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
+               sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
 
-               /* Set thresholds on receive queue's */
-               sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), rxup);
-               sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), rxlo);
+               tp = space - 2048/8;
+               sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
+               sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
        } else {
                /* Enable store & forward on Tx queue's because
                 * Tx FIFO is only 1K on Yukon
@@ -695,9 +704,10 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
  * This is a workaround code taken from SysKonnect sk98lin driver
  * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
  */
-static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q,
+static void sky2_put_idx(struct sky2_hw *hw, unsigned q,
                                u16 idx, u16 *last, u16 size)
 {
+       wmb();
        if (is_ec_a1(hw) && idx < *last) {
                u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
 
@@ -721,6 +731,7 @@ setnew:
                sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
        }
        *last = idx;
+       mmiowb();
 }
 
 
@@ -734,11 +745,11 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
 /* Return high part of DMA address (could be 32 or 64 bit) */
 static inline u32 high32(dma_addr_t a)
 {
-       return (a >> 16) >> 16;
+       return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0;
 }
 
 /* Build description to hardware about buffer */
-static inline void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
+static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
 {
        struct sky2_rx_le *le;
        u32 hi = high32(map);
@@ -878,13 +889,13 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
        struct sky2_hw *hw = sky2->hw;
        u16 port = sky2->port;
 
-       spin_lock(&sky2->tx_lock);
+       spin_lock_bh(&sky2->tx_lock);
 
        sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
        sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
        sky2->vlgrp = grp;
 
-       spin_unlock(&sky2->tx_lock);
+       spin_unlock_bh(&sky2->tx_lock);
 }
 
 static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -893,27 +904,42 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        struct sky2_hw *hw = sky2->hw;
        u16 port = sky2->port;
 
-       spin_lock(&sky2->tx_lock);
+       spin_lock_bh(&sky2->tx_lock);
 
        sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
        sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
        if (sky2->vlgrp)
                sky2->vlgrp->vlan_devices[vid] = NULL;
 
-       spin_unlock(&sky2->tx_lock);
+       spin_unlock_bh(&sky2->tx_lock);
 }
 #endif
 
+/*
+ * It appears the hardware has a bug in the FIFO logic that
+ * cause it to hang if the FIFO gets overrun and the receive buffer
+ * is not aligned. ALso alloc_skb() won't align properly if slab
+ * debugging is enabled.
+ */
+static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
+{
+       struct sk_buff *skb;
+
+       skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
+       if (likely(skb)) {
+               unsigned long p = (unsigned long) skb->data;
+               skb_reserve(skb,
+                       ((p + RX_SKB_ALIGN - 1) & ~(RX_SKB_ALIGN - 1)) - p);
+       }
+
+       return skb;
+}
+
 /*
  * Allocate and setup receiver buffer pool.
  * In case of 64 bit dma, there are 2X as many list elements
  * available as ring entries
  * and need to reserve one list element so we don't wrap around.
- *
- * It appears the hardware has a bug in the FIFO logic that
- * cause it to hang if the FIFO gets overrun and the receive buffer
- * is not aligned.  This means we can't use skb_reserve to align
- * the IP header.
  */
 static int sky2_rx_start(struct sky2_port *sky2)
 {
@@ -929,7 +955,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
        for (i = 0; i < sky2->rx_pending; i++) {
                struct ring_info *re = sky2->rx_ring + i;
 
-               re->skb = dev_alloc_skb(sky2->rx_bufsize);
+               re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL);
                if (!re->skb)
                        goto nomem;
 
@@ -986,19 +1012,19 @@ static int sky2_up(struct net_device *dev)
 
        sky2_mac_init(hw, port);
 
-       /* Configure RAM buffers */
-       if (hw->chip_id == CHIP_ID_YUKON_FE ||
-           (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2))
-               ramsize = 4096;
-       else {
-               u8 e0 = sky2_read8(hw, B2_E_0);
-               ramsize = (e0 == 0) ? (128 * 1024) : (e0 * 4096);
-       }
+       /* Determine available ram buffer space (in 4K blocks).
+        * Note: not sure about the FE setting below yet
+        */
+       if (hw->chip_id == CHIP_ID_YUKON_FE)
+               ramsize = 4;
+       else
+               ramsize = sky2_read8(hw, B2_E_0);
+
+       /* Give transmitter one third (rounded up) */
+       rxspace = ramsize - (ramsize + 2) / 3;
 
-       /* 2/3 for Rx */
-       rxspace = (2 * ramsize) / 3;
        sky2_ramset(hw, rxqaddr[port], 0, rxspace);
-       sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
+       sky2_ramset(hw, txqaddr[port], rxspace, ramsize);
 
        /* Make sure SyncQ is disabled */
        sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
@@ -1054,7 +1080,7 @@ static inline int tx_avail(const struct sky2_port *sky2)
 }
 
 /* Estimate of number of transmit list elements required */
-static inline unsigned tx_le_req(const struct sk_buff *skb)
+static unsigned tx_le_req(const struct sk_buff *skb)
 {
        unsigned count;
 
@@ -1090,6 +1116,10 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
        u16 mss;
        u8 ctrl;
 
+       /* No BH disabling for tx_lock here.  We are running in BH disabled
+        * context and TX reclaim runs via poll inside of a software
+        * interrupt, and no related locks in IRQ processing.
+        */
        if (!spin_trylock(&sky2->tx_lock))
                return NETDEV_TX_LOCKED;
 
@@ -1099,8 +1129,9 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
                 */
                if (!netif_queue_stopped(dev)) {
                        netif_stop_queue(dev);
-                       printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
-                              dev->name);
+                       if (net_ratelimit())
+                               printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
+                                      dev->name);
                }
                spin_unlock(&sky2->tx_lock);
 
@@ -1199,7 +1230,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
 
                mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
                                       frag->size, PCI_DMA_TODEVICE);
-               addr64 = (mapping >> 16) >> 16;
+               addr64 = high32(mapping);
                if (addr64 != sky2->tx_addr64) {
                        le = get_tx_le(sky2);
                        le->tx.addr = cpu_to_le32(addr64);
@@ -1229,7 +1260,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
                netif_stop_queue(dev);
 
 out_unlock:
-       mmiowb();
        spin_unlock(&sky2->tx_lock);
 
        dev->trans_start = jiffies;
@@ -1282,17 +1312,17 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
                dev_kfree_skb_any(skb);
        }
 
-       spin_lock(&sky2->tx_lock);
        sky2->tx_cons = put;
        if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
                netif_wake_queue(dev);
-       spin_unlock(&sky2->tx_lock);
 }
 
 /* Cleanup all untransmitted buffers, assume transmitter not running */
 static void sky2_tx_clean(struct sky2_port *sky2)
 {
+       spin_lock_bh(&sky2->tx_lock);
        sky2_tx_complete(sky2, sky2->tx_prod);
+       spin_unlock_bh(&sky2->tx_lock);
 }
 
 /* Network shutdown */
@@ -1582,28 +1612,40 @@ out:
        local_irq_enable();
 }
 
+
+/* Transmit timeout is only called if we are running, carries is up
+ * and tx queue is full (stopped).
+ */
 static void sky2_tx_timeout(struct net_device *dev)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
        struct sky2_hw *hw = sky2->hw;
        unsigned txq = txqaddr[sky2->port];
+       u16 ridx;
+
+       /* Maybe we just missed an status interrupt */
+       spin_lock(&sky2->tx_lock);
+       ridx = sky2_read16(hw,
+                          sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
+       sky2_tx_complete(sky2, ridx);
+       spin_unlock(&sky2->tx_lock);
+
+       if (!netif_queue_stopped(dev)) {
+               if (net_ratelimit())
+                       pr_info(PFX "transmit interrupt missed? recovered\n");
+               return;
+       }
 
        if (netif_msg_timer(sky2))
                printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
 
-       netif_stop_queue(dev);
-
        sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
-       sky2_read32(hw, Q_ADDR(txq, Q_CSR));
-
        sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
 
        sky2_tx_clean(sky2);
 
        sky2_qset(hw, txq);
        sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
-
-       netif_wake_queue(dev);
 }
 
 
@@ -1713,7 +1755,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
        } else {
                struct sk_buff *nskb;
 
-               nskb = dev_alloc_skb(sky2->rx_bufsize);
+               nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC);
                if (!nskb)
                        goto resubmit;
 
@@ -1745,7 +1787,7 @@ oversize:
 error:
        ++sky2->net_stats.rx_errors;
 
-       if (netif_msg_rx_err(sky2))
+       if (netif_msg_rx_err(sky2) && net_ratelimit())
                printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
                       sky2->netdev->name, status, length);
 
@@ -1766,13 +1808,16 @@ error:
  */
 #define TX_NO_STATUS   0xffff
 
-static inline void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
+static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
 {
        if (last != TX_NO_STATUS) {
                struct net_device *dev = hw->dev[port];
                if (dev && netif_running(dev)) {
                        struct sky2_port *sky2 = netdev_priv(dev);
+
+                       spin_lock(&sky2->tx_lock);
                        sky2_tx_complete(sky2, last);
+                       spin_unlock(&sky2->tx_lock);
                }
        }
 }
@@ -1800,7 +1845,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
                struct sk_buff *skb;
                u32 status;
                u16 length;
-               u8 op;
 
                le = hw->st_le + hw->st_idx;
                hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
@@ -1814,10 +1858,8 @@ static int sky2_poll(struct net_device *dev0, int *budget)
                sky2 = netdev_priv(dev);
                status = le32_to_cpu(le->status);
                length = le16_to_cpu(le->length);
-               op = le->opcode & ~HW_OWNER;
-               le->opcode = 0;
 
-               switch (op) {
+               switch (le->opcode & ~HW_OWNER) {
                case OP_RXSTAT:
                        skb = sky2_receive(sky2, length, status);
                        if (!skb)
@@ -1865,14 +1907,13 @@ static int sky2_poll(struct net_device *dev0, int *budget)
                default:
                        if (net_ratelimit())
                                printk(KERN_WARNING PFX
-                                      "unknown status opcode 0x%x\n", op);
+                                      "unknown status opcode 0x%x\n", le->opcode);
                        break;
                }
        }
 
 exit_loop:
        sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
-       mmiowb();
 
        sky2_tx_check(hw, 0, tx_done[0]);
        sky2_tx_check(hw, 1, tx_done[1]);
@@ -1887,7 +1928,6 @@ exit_loop:
                netif_rx_complete(dev0);
                hw->intr_mask |= Y2_IS_STAT_BMU;
                sky2_write32(hw, B0_IMSK, hw->intr_mask);
-               mmiowb();
                return 0;
        } else {
                *budget -= work_done;
@@ -1900,35 +1940,42 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
 {
        struct net_device *dev = hw->dev[port];
 
-       printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
-              dev->name, status);
+       if (net_ratelimit())
+               printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
+                      dev->name, status);
 
        if (status & Y2_IS_PAR_RD1) {
-               printk(KERN_ERR PFX "%s: ram data read parity error\n",
-                      dev->name);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: ram data read parity error\n",
+                              dev->name);
                /* Clear IRQ */
                sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
        }
 
        if (status & Y2_IS_PAR_WR1) {
-               printk(KERN_ERR PFX "%s: ram data write parity error\n",
-                      dev->name);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: ram data write parity error\n",
+                              dev->name);
 
                sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
        }
 
        if (status & Y2_IS_PAR_MAC1) {
-               printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
                sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
        }
 
        if (status & Y2_IS_PAR_RX1) {
-               printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
                sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
        }
 
        if (status & Y2_IS_TCP_TXA1) {
-               printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: TCP segmentation error\n",
+                              dev->name);
                sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
        }
 }
@@ -1944,8 +1991,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
                u16 pci_err;
 
                pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
-               printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
-                      pci_name(hw->pdev), pci_err);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
+                              pci_name(hw->pdev), pci_err);
 
                sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
                pci_write_config_word(hw->pdev, PCI_STATUS,
@@ -1959,8 +2007,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
 
                pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
 
-               printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
-                      pci_name(hw->pdev), pex_err);
+               if (net_ratelimit())
+                       printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
+                              pci_name(hw->pdev), pex_err);
 
                /* clear the interrupt */
                sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -2250,7 +2299,7 @@ static int sky2_reset(struct sky2_hw *hw)
        return 0;
 }
 
-static inline u32 sky2_supported_modes(const struct sky2_hw *hw)
+static u32 sky2_supported_modes(const struct sky2_hw *hw)
 {
        u32 modes;
        if (hw->copper) {
@@ -2995,7 +3044,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
        return dev;
 }
 
-static inline void sky2_show_addr(struct net_device *dev)
+static void __devinit sky2_show_addr(struct net_device *dev)
 {
        const struct sky2_port *sky2 = netdev_priv(dev);
 
@@ -3038,13 +3087,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
                goto err_out_free_regions;
        }
 
-       if (sizeof(dma_addr_t) > sizeof(u32)) {
-               err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
-               if (!err)
-                       using_dac = 1;
-       }
+       if (sizeof(dma_addr_t) > sizeof(u32) &&
+           !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+               using_dac = 1;
+               err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+               if (err < 0) {
+                       printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
+                              "for consistent allocations\n", pci_name(pdev));
+                       goto err_out_free_regions;
+               }
 
-       if (!using_dac) {
+       } else {
                err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
                if (err) {
                        printk(KERN_ERR PFX "%s no usable DMA configuration\n",
@@ -3052,6 +3105,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
                        goto err_out_free_regions;
                }
        }
+
 #ifdef __BIG_ENDIAN
        /* byte swap descriptors in hardware */
        {
@@ -3064,14 +3118,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
 #endif
 
        err = -ENOMEM;
-       hw = kmalloc(sizeof(*hw), GFP_KERNEL);
+       hw = kzalloc(sizeof(*hw), GFP_KERNEL);
        if (!hw) {
                printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
                       pci_name(pdev));
                goto err_out_free_regions;
        }
 
-       memset(hw, 0, sizeof(*hw));
        hw->pdev = pdev;
 
        hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
index 0d765f1733b5cd637e720f2c16369573b4762caf..1f5975a61e1f1526d8a43869c760ccd96e47753b 100644 (file)
@@ -22,7 +22,6 @@
  */
 
 #include <linux/config.h>
-
 #include <linux/compiler.h>
 #include <linux/crc32.h>
 #include <linux/delay.h>
@@ -30,6 +29,7 @@
 #include <linux/ethtool.h>
 #include <linux/firmware.h>
 #include <linux/if_vlan.h>
+#include <linux/in.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/ip.h>
@@ -43,6 +43,7 @@
 #include <linux/slab.h>
 #include <linux/tcp.h>
 #include <linux/types.h>
+#include <linux/vmalloc.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
 #include <asm/bitops.h>
@@ -108,42 +109,6 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
        writel(value, card->regs + reg);
 }
 
-/**
- * spider_net_write_reg_sync - writes to an SMMIO register of a card
- * @card: device structure
- * @reg: register to write to
- * @value: value to write into the specified SMMIO register
- *
- * Unlike spider_net_write_reg, this will also make sure the
- * data arrives on the card by reading the reg again.
- */
-static void
-spider_net_write_reg_sync(struct spider_net_card *card, u32 reg, u32 value)
-{
-       value = cpu_to_le32(value);
-       writel(value, card->regs + reg);
-       (void)readl(card->regs + reg);
-}
-
-/**
- * spider_net_rx_irq_off - switch off rx irq on this spider card
- * @card: device structure
- *
- * switches off rx irq by masking them out in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_off(struct spider_net_card *card)
-{
-       u32 regvalue;
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->intmask_lock, flags);
-       regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
-       regvalue &= ~SPIDER_NET_RXINT;
-       spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
-       spin_unlock_irqrestore(&card->intmask_lock, flags);
-}
-
 /** spider_net_write_phy - write to phy register
  * @netdev: adapter to be written to
  * @mii_id: id of MII
@@ -199,60 +164,33 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
 }
 
 /**
- * spider_net_rx_irq_on - switch on rx irq on this spider card
- * @card: device structure
- *
- * switches on rx irq by enabling them in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_on(struct spider_net_card *card)
-{
-       u32 regvalue;
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->intmask_lock, flags);
-       regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
-       regvalue |= SPIDER_NET_RXINT;
-       spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
-       spin_unlock_irqrestore(&card->intmask_lock, flags);
-}
-
-/**
- * spider_net_tx_irq_off - switch off tx irq on this spider card
+ * spider_net_rx_irq_off - switch off rx irq on this spider card
  * @card: device structure
  *
- * switches off tx irq by masking them out in the GHIINTnMSK register
+ * switches off rx irq by masking them out in the GHIINTnMSK register
  */
 static void
-spider_net_tx_irq_off(struct spider_net_card *card)
+spider_net_rx_irq_off(struct spider_net_card *card)
 {
        u32 regvalue;
-       unsigned long flags;
 
-       spin_lock_irqsave(&card->intmask_lock, flags);
-       regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
-       regvalue &= ~SPIDER_NET_TXINT;
-       spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
-       spin_unlock_irqrestore(&card->intmask_lock, flags);
+       regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
+       spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
 }
 
 /**
- * spider_net_tx_irq_on - switch on tx irq on this spider card
+ * spider_net_rx_irq_on - switch on rx irq on this spider card
  * @card: device structure
  *
- * switches on tx irq by enabling them in the GHIINTnMSK register
+ * switches on rx irq by enabling them in the GHIINTnMSK register
  */
 static void
-spider_net_tx_irq_on(struct spider_net_card *card)
+spider_net_rx_irq_on(struct spider_net_card *card)
 {
        u32 regvalue;
-       unsigned long flags;
 
-       spin_lock_irqsave(&card->intmask_lock, flags);
-       regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
-       regvalue |= SPIDER_NET_TXINT;
-       spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
-       spin_unlock_irqrestore(&card->intmask_lock, flags);
+       regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
+       spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
 }
 
 /**
@@ -326,9 +264,8 @@ static enum spider_net_descr_status
 spider_net_get_descr_status(struct spider_net_descr *descr)
 {
        u32 cmd_status;
-       rmb();
+
        cmd_status = descr->dmac_cmd_status;
-       rmb();
        cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
        /* no need to mask out any bits, as cmd_status is 32 bits wide only
         * (and unsigned) */
@@ -349,7 +286,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
 {
        u32 cmd_status;
        /* read the status */
-       mb();
        cmd_status = descr->dmac_cmd_status;
        /* clean the upper 4 bits */
        cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
@@ -357,7 +293,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
        cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
        /* and write it back */
        descr->dmac_cmd_status = cmd_status;
-       wmb();
 }
 
 /**
@@ -398,8 +333,9 @@ spider_net_init_chain(struct spider_net_card *card,
 {
        int i;
        struct spider_net_descr *descr;
+       dma_addr_t buf;
 
-       spin_lock_init(&card->chain_lock);
+       atomic_set(&card->rx_chain_refill,0);
 
        descr = start_descr;
        memset(descr, 0, sizeof(*descr) * no);
@@ -408,14 +344,14 @@ spider_net_init_chain(struct spider_net_card *card,
        for (i=0; i<no; i++, descr++) {
                spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
 
-               descr->bus_addr =
-                       pci_map_single(card->pdev, descr,
-                                      SPIDER_NET_DESCR_SIZE,
-                                      PCI_DMA_BIDIRECTIONAL);
+               buf = pci_map_single(card->pdev, descr,
+                                    SPIDER_NET_DESCR_SIZE,
+                                    PCI_DMA_BIDIRECTIONAL);
 
-               if (descr->bus_addr == DMA_ERROR_CODE)
+               if (buf == DMA_ERROR_CODE)
                        goto iommu_error;
 
+               descr->bus_addr = buf;
                descr->next = descr + 1;
                descr->prev = descr - 1;
 
@@ -439,7 +375,8 @@ iommu_error:
        for (i=0; i < no; i++, descr++)
                if (descr->bus_addr)
                        pci_unmap_single(card->pdev, descr->bus_addr,
-                                        SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL);
+                                        SPIDER_NET_DESCR_SIZE,
+                                        PCI_DMA_BIDIRECTIONAL);
        return -ENOMEM;
 }
 
@@ -459,7 +396,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
                if (descr->skb) {
                        dev_kfree_skb(descr->skb);
                        pci_unmap_single(card->pdev, descr->buf_addr,
-                                        SPIDER_NET_MAX_MTU,
+                                        SPIDER_NET_MAX_FRAME,
                                         PCI_DMA_BIDIRECTIONAL);
                }
                descr = descr->next;
@@ -480,12 +417,13 @@ static int
 spider_net_prepare_rx_descr(struct spider_net_card *card,
                            struct spider_net_descr *descr)
 {
+       dma_addr_t buf;
        int error = 0;
        int offset;
        int bufsize;
 
        /* we need to round up the buffer size to a multiple of 128 */
-       bufsize = (SPIDER_NET_MAX_MTU + SPIDER_NET_RXBUF_ALIGN - 1) &
+       bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
                (~(SPIDER_NET_RXBUF_ALIGN - 1));
 
        /* and we need to have it 128 byte aligned, therefore we allocate a
@@ -493,10 +431,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
        /* allocate an skb */
        descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
        if (!descr->skb) {
-               if (net_ratelimit())
-                       if (netif_msg_rx_err(card))
-                               pr_err("Not enough memory to allocate "
-                                       "rx buffer\n");
+               if (netif_msg_rx_err(card) && net_ratelimit())
+                       pr_err("Not enough memory to allocate rx buffer\n");
                return -ENOMEM;
        }
        descr->buf_size = bufsize;
@@ -510,12 +446,12 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
        if (offset)
                skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
        /* io-mmu-map the skb */
-       descr->buf_addr = pci_map_single(card->pdev, descr->skb->data,
-                                        SPIDER_NET_MAX_MTU,
-                                        PCI_DMA_BIDIRECTIONAL);
-       if (descr->buf_addr == DMA_ERROR_CODE) {
+       buf = pci_map_single(card->pdev, descr->skb->data,
+                            SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
+       descr->buf_addr = buf;
+       if (buf == DMA_ERROR_CODE) {
                dev_kfree_skb_any(descr->skb);
-               if (netif_msg_rx_err(card))
+               if (netif_msg_rx_err(card) && net_ratelimit())
                        pr_err("Could not iommu-map rx buffer\n");
                spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
        } else {
@@ -526,10 +462,10 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
 }
 
 /**
- * spider_net_enable_rxctails - sets RX dmac chain tail addresses
+ * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
  * @card: card structure
  *
- * spider_net_enable_rxctails sets the RX DMAC chain tail adresses in the
+ * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
  * chip by writing to the appropriate register. DMA is enabled in
  * spider_net_enable_rxdmac.
  */
@@ -551,6 +487,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
 static void
 spider_net_enable_rxdmac(struct spider_net_card *card)
 {
+       wmb();
        spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
                             SPIDER_NET_DMA_RX_VALUE);
 }
@@ -559,32 +496,28 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
  * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
  * @card: card structure
  *
- * refills descriptors in all chains (last used chain first): allocates skbs
- * and iommu-maps them.
+ * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
  */
 static void
 spider_net_refill_rx_chain(struct spider_net_card *card)
 {
        struct spider_net_descr_chain *chain;
-       int count = 0;
-       unsigned long flags;
 
        chain = &card->rx_chain;
 
-       spin_lock_irqsave(&card->chain_lock, flags);
-       while (spider_net_get_descr_status(chain->head) ==
-                               SPIDER_NET_DESCR_NOT_IN_USE) {
-               if (spider_net_prepare_rx_descr(card, chain->head))
-                       break;
-               count++;
-               chain->head = chain->head->next;
-       }
-       spin_unlock_irqrestore(&card->chain_lock, flags);
+       /* one context doing the refill (and a second context seeing that
+        * and omitting it) is ok. If called by NAPI, we'll be called again
+        * as spider_net_decode_one_descr is called several times. If some
+        * interrupt calls us, the NAPI is about to clean up anyway. */
+       if (atomic_inc_return(&card->rx_chain_refill) == 1)
+               while (spider_net_get_descr_status(chain->head) ==
+                      SPIDER_NET_DESCR_NOT_IN_USE) {
+                       if (spider_net_prepare_rx_descr(card, chain->head))
+                               break;
+                       chain->head = chain->head->next;
+               }
 
-       /* could be optimized, only do that, if we know the DMA processing
-        * has terminated */
-       if (count)
-               spider_net_enable_rxdmac(card);
+       atomic_dec(&card->rx_chain_refill);
 }
 
 /**
@@ -613,6 +546,7 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
        /* this will allocate the rest of the rx buffers; if not, it's
         * business as usual later on */
        spider_net_refill_rx_chain(card);
+       spider_net_enable_rxdmac(card);
        return 0;
 
 error:
@@ -649,24 +583,30 @@ spider_net_release_tx_descr(struct spider_net_card *card,
  * @card: adapter structure
  * @brutal: if set, don't care about whether descriptor seems to be in use
  *
- * releases the tx descriptors that spider has finished with (if non-brutal)
- * or simply release tx descriptors (if brutal)
+ * returns 0 if the tx ring is empty, otherwise 1.
+ *
+ * spider_net_release_tx_chain releases the tx descriptors that spider has
+ * finished with (if non-brutal) or simply release tx descriptors (if brutal).
+ * If some other context is calling this function, we return 1 so that we're
+ * scheduled again (if we were scheduled) and will not loose initiative.
  */
-static void
+static int
 spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
 {
        struct spider_net_descr_chain *tx_chain = &card->tx_chain;
        enum spider_net_descr_status status;
 
-       spider_net_tx_irq_off(card);
+       if (atomic_inc_return(&card->tx_chain_release) != 1) {
+               atomic_dec(&card->tx_chain_release);
+               return 1;
+       }
 
-       /* no lock for chain needed, if this is only executed once at a time */
-again:
        for (;;) {
                status = spider_net_get_descr_status(tx_chain->tail);
                switch (status) {
                case SPIDER_NET_DESCR_CARDOWNED:
-                       if (!brutal) goto out;
+                       if (!brutal)
+                               goto out;
                        /* fallthrough, if we release the descriptors
                         * brutally (then we don't care about
                         * SPIDER_NET_DESCR_CARDOWNED) */
@@ -693,25 +633,30 @@ again:
                tx_chain->tail = tx_chain->tail->next;
        }
 out:
+       atomic_dec(&card->tx_chain_release);
+
        netif_wake_queue(card->netdev);
 
-       if (!brutal) {
-               /* switch on tx irqs (while we are still in the interrupt
-                * handler, so we don't get an interrupt), check again
-                * for done descriptors. This results in fewer interrupts */
-               spider_net_tx_irq_on(card);
-               status = spider_net_get_descr_status(tx_chain->tail);
-               switch (status) {
-                       case SPIDER_NET_DESCR_RESPONSE_ERROR:
-                       case SPIDER_NET_DESCR_PROTECTION_ERROR:
-                       case SPIDER_NET_DESCR_FORCE_END:
-                       case SPIDER_NET_DESCR_COMPLETE:
-                               goto again;
-                       default:
-                               break;
-               }
-       }
+       if (status == SPIDER_NET_DESCR_CARDOWNED)
+               return 1;
+       return 0;
+}
 
+/**
+ * spider_net_cleanup_tx_ring - cleans up the TX ring
+ * @card: card structure
+ *
+ * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
+ * interrupts to cleanup our TX ring) and returns sent packets to the stack
+ * by freeing them
+ */
+static void
+spider_net_cleanup_tx_ring(struct spider_net_card *card)
+{
+       if ( (spider_net_release_tx_chain(card, 0)) &&
+             (card->netdev->flags & IFF_UP) ) {
+               mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
+       }
 }
 
 /**
@@ -726,16 +671,22 @@ out:
 static u8
 spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
 {
-       /* FIXME: an addr of 01:00:5e:00:00:01 must result in 0xa9,
-        * ff:ff:ff:ff:ff:ff must result in 0xfd */
        u32 crc;
        u8 hash;
+       char addr_for_crc[ETH_ALEN] = { 0, };
+       int i, bit;
 
-       crc = crc32_be(~0, addr, netdev->addr_len);
+       for (i = 0; i < ETH_ALEN * 8; i++) {
+               bit = (addr[i / 8] >> (i % 8)) & 1;
+               addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
+       }
+
+       crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
 
        hash = (crc >> 27);
        hash <<= 3;
        hash |= crc & 7;
+       hash &= 0xff;
 
        return hash;
 }
@@ -821,9 +772,11 @@ spider_net_stop(struct net_device *netdev)
 {
        struct spider_net_card *card = netdev_priv(netdev);
 
+       tasklet_kill(&card->rxram_full_tl);
        netif_poll_disable(netdev);
        netif_carrier_off(netdev);
        netif_stop_queue(netdev);
+       del_timer_sync(&card->tx_timer);
 
        /* disable/mask all interrupts */
        spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
@@ -872,13 +825,15 @@ spider_net_get_next_tx_descr(struct spider_net_card *card)
  * @skb: packet to consider
  *
  * fills out the command and status field of the descriptor structure,
- * depending on hardware checksum settings. This function assumes a wmb()
- * has executed before.
+ * depending on hardware checksum settings.
  */
 static void
 spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
                               struct sk_buff *skb)
 {
+       /* make sure the other fields in the descriptor are written */
+       wmb();
+
        if (skb->ip_summed != CHECKSUM_HW) {
                descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
                return;
@@ -887,14 +842,13 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
        /* is packet ip?
         * if yes: tcp? udp? */
        if (skb->protocol == htons(ETH_P_IP)) {
-               if (skb->nh.iph->protocol == IPPROTO_TCP) {
+               if (skb->nh.iph->protocol == IPPROTO_TCP)
                        descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
-               } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
+               else if (skb->nh.iph->protocol == IPPROTO_UDP)
                        descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
-               } else { /* the stack should checksum non-tcp and non-udp
-                           packets on his own: NETIF_F_IP_CSUM */
+               else /* the stack should checksum non-tcp and non-udp
+                       packets on his own: NETIF_F_IP_CSUM */
                        descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
-               }
        }
 }
 
@@ -914,23 +868,22 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
                            struct spider_net_descr *descr,
                            struct sk_buff *skb)
 {
-       descr->buf_addr = pci_map_single(card->pdev, skb->data,
-                                        skb->len, PCI_DMA_BIDIRECTIONAL);
-       if (descr->buf_addr == DMA_ERROR_CODE) {
-               if (netif_msg_tx_err(card))
+       dma_addr_t buf;
+
+       buf = pci_map_single(card->pdev, skb->data,
+                            skb->len, PCI_DMA_BIDIRECTIONAL);
+       if (buf == DMA_ERROR_CODE) {
+               if (netif_msg_tx_err(card) && net_ratelimit())
                        pr_err("could not iommu-map packet (%p, %i). "
                                  "Dropping packet\n", skb->data, skb->len);
                return -ENOMEM;
        }
 
+       descr->buf_addr = buf;
        descr->buf_size = skb->len;
        descr->skb = skb;
        descr->data_status = 0;
 
-       /* make sure the above values are in memory before we change the
-        * status */
-       wmb();
-
        spider_net_set_txdescr_cmdstat(descr,skb);
 
        return 0;
@@ -972,17 +925,12 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
        struct spider_net_descr *descr;
        int result;
 
-       descr = spider_net_get_next_tx_descr(card);
+       spider_net_release_tx_chain(card, 0);
 
-       if (!descr) {
-               netif_stop_queue(netdev);
+       descr = spider_net_get_next_tx_descr(card);
 
-               descr = spider_net_get_next_tx_descr(card);
-               if (!descr)
-                       goto error;
-               else
-                       netif_start_queue(netdev);
-       }
+       if (!descr)
+               goto error;
 
        result = spider_net_prepare_tx_descr(card, descr, skb);
        if (result)
@@ -990,19 +938,25 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        card->tx_chain.head = card->tx_chain.head->next;
 
-       /* make sure the status from spider_net_prepare_tx_descr is in
-        * memory before we check out the previous descriptor */
-       wmb();
-
        if (spider_net_get_descr_status(descr->prev) !=
-           SPIDER_NET_DESCR_CARDOWNED)
-               spider_net_kick_tx_dma(card, descr);
+           SPIDER_NET_DESCR_CARDOWNED) {
+               /* make sure the current descriptor is in memory. Then
+                * kicking it on again makes sense, if the previous is not
+                * card-owned anymore. Check the previous descriptor twice
+                * to omit an mb() in heavy traffic cases */
+               mb();
+               if (spider_net_get_descr_status(descr->prev) !=
+                   SPIDER_NET_DESCR_CARDOWNED)
+                       spider_net_kick_tx_dma(card, descr);
+       }
+
+       mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
 
        return NETDEV_TX_OK;
 
 error:
        card->netdev_stats.tx_dropped++;
-       return NETDEV_TX_LOCKED;
+       return NETDEV_TX_BUSY;
 }
 
 /**
@@ -1027,6 +981,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
  * @descr: descriptor to process
  * @card: card structure
+ * @napi: whether caller is in NAPI context
  *
  * returns 1 on success, 0 if no packet was passed to the stack
  *
@@ -1035,7 +990,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  */
 static int
 spider_net_pass_skb_up(struct spider_net_descr *descr,
-                      struct spider_net_card *card)
+                      struct spider_net_card *card, int napi)
 {
        struct sk_buff *skb;
        struct net_device *netdev;
@@ -1046,22 +1001,20 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
 
        netdev = card->netdev;
 
-       /* check for errors in the data_error flag */
-       if ((data_error & SPIDER_NET_DATA_ERROR_MASK) &&
-           netif_msg_rx_err(card))
-               pr_err("error in received descriptor found, "
-                      "data_status=x%08x, data_error=x%08x\n",
-                      data_status, data_error);
-
-       /* prepare skb, unmap descriptor */
-       skb = descr->skb;
-       pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_MTU,
+       /* unmap descriptor */
+       pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
                         PCI_DMA_BIDIRECTIONAL);
 
        /* the cases we'll throw away the packet immediately */
-       if (data_error & SPIDER_NET_DESTROY_RX_FLAGS)
+       if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
+               if (netif_msg_rx_err(card))
+                       pr_err("error in received descriptor found, "
+                              "data_status=x%08x, data_error=x%08x\n",
+                              data_status, data_error);
                return 0;
+       }
 
+       skb = descr->skb;
        skb->dev = netdev;
        skb_put(skb, descr->valid_size);
 
@@ -1073,14 +1026,14 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
 
        /* checksum offload */
        if (card->options.rx_csum) {
-               if ( (data_status & SPIDER_NET_DATA_STATUS_CHK_MASK) &&
-                    (!(data_error & SPIDER_NET_DATA_ERROR_CHK_MASK)) )
+               if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
+                      SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
+                    !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                else
                        skb->ip_summed = CHECKSUM_NONE;
-       } else {
+       } else
                skb->ip_summed = CHECKSUM_NONE;
-       }
 
        if (data_status & SPIDER_NET_VLAN_PACKET) {
                /* further enhancements: HW-accel VLAN
@@ -1089,7 +1042,10 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
        }
 
        /* pass skb up to stack */
-       netif_receive_skb(skb);
+       if (napi)
+               netif_receive_skb(skb);
+       else
+               netif_rx_ni(skb);
 
        /* update netdevice statistics */
        card->netdev_stats.rx_packets++;
@@ -1099,16 +1055,18 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
 }
 
 /**
- * spider_net_decode_descr - processes an rx descriptor
+ * spider_net_decode_one_descr - processes an rx descriptor
  * @card: card structure
+ * @napi: whether caller is in NAPI context
  *
  * returns 1 if a packet has been sent to the stack, otherwise 0
  *
  * processes an rx descriptor by iommu-unmapping the data buffer and passing
- * the packet up to the stack
+ * the packet up to the stack. This function is called in softirq
+ * context, e.g. either bottom half from interrupt or NAPI polling context
  */
 static int
-spider_net_decode_one_descr(struct spider_net_card *card)
+spider_net_decode_one_descr(struct spider_net_card *card, int napi)
 {
        enum spider_net_descr_status status;
        struct spider_net_descr *descr;
@@ -1122,17 +1080,19 @@ spider_net_decode_one_descr(struct spider_net_card *card)
 
        if (status == SPIDER_NET_DESCR_CARDOWNED) {
                /* nothing in the descriptor yet */
-               return 0;
+               result=0;
+               goto out;
        }
 
        if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
-               /* not initialized yet, I bet chain->tail == chain->head
-                * and the ring is empty */
+               /* not initialized yet, the ring must be empty */
                spider_net_refill_rx_chain(card);
-               return 0;
+               spider_net_enable_rxdmac(card);
+               result=0;
+               goto out;
        }
 
-       /* descriptor definitively used -- move on head */
+       /* descriptor definitively used -- move on tail */
        chain->tail = descr->next;
 
        result = 0;
@@ -1143,6 +1103,9 @@ spider_net_decode_one_descr(struct spider_net_card *card)
                        pr_err("%s: dropping RX descriptor with state %d\n",
                               card->netdev->name, status);
                card->netdev_stats.rx_dropped++;
+               pci_unmap_single(card->pdev, descr->buf_addr,
+                                SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
+               dev_kfree_skb_irq(descr->skb);
                goto refill;
        }
 
@@ -1155,12 +1118,13 @@ spider_net_decode_one_descr(struct spider_net_card *card)
        }
 
        /* ok, we've got a packet in descr */
-       result = spider_net_pass_skb_up(descr, card);
+       result = spider_net_pass_skb_up(descr, card, napi);
 refill:
        spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
        /* change the descriptor state: */
-       spider_net_refill_rx_chain(card);
-
+       if (!napi)
+               spider_net_refill_rx_chain(card);
+out:
        return result;
 }
 
@@ -1186,7 +1150,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
        packets_to_do = min(*budget, netdev->quota);
 
        while (packets_to_do) {
-               if (spider_net_decode_one_descr(card)) {
+               if (spider_net_decode_one_descr(card, 1)) {
                        packets_done++;
                        packets_to_do--;
                } else {
@@ -1198,6 +1162,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
 
        netdev->quota -= packets_done;
        *budget -= packets_done;
+       spider_net_refill_rx_chain(card);
 
        /* if all packets are in the stack, enable interrupts and return 0 */
        /* if not, return 1 */
@@ -1341,6 +1306,24 @@ spider_net_enable_txdmac(struct spider_net_card *card)
                             card->tx_chain.tail->bus_addr);
 }
 
+/**
+ * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
+ * @card: card structure
+ *
+ * spider_net_handle_rxram_full empties the RX ring so that spider can put
+ * more packets in it and empty its RX RAM. This is called in bottom half
+ * context
+ */
+static void
+spider_net_handle_rxram_full(struct spider_net_card *card)
+{
+       while (spider_net_decode_one_descr(card, 0))
+               ;
+       spider_net_enable_rxchtails(card);
+       spider_net_enable_rxdmac(card);
+       netif_rx_schedule(card->netdev);
+}
+
 /**
  * spider_net_handle_error_irq - handles errors raised by an interrupt
  * @card: card structure
@@ -1449,17 +1432,21 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
                                switch (i)
        {
        case SPIDER_NET_GTMFLLINT:
-               if (netif_msg_intr(card))
+               if (netif_msg_intr(card) && net_ratelimit())
                        pr_err("Spider TX RAM full\n");
                show_error = 0;
                break;
+       case SPIDER_NET_GRFDFLLINT: /* fallthrough */
+       case SPIDER_NET_GRFCFLLINT: /* fallthrough */
+       case SPIDER_NET_GRFBFLLINT: /* fallthrough */
+       case SPIDER_NET_GRFAFLLINT: /* fallthrough */
        case SPIDER_NET_GRMFLLINT:
-               if (netif_msg_intr(card))
+               if (netif_msg_intr(card) && net_ratelimit())
                        pr_err("Spider RX RAM full, incoming packets "
-                              "might be discarded !\n");
-               netif_rx_schedule(card->netdev);
-               spider_net_enable_rxchtails(card);
-               spider_net_enable_rxdmac(card);
+                              "might be discarded!\n");
+               spider_net_rx_irq_off(card);
+               tasklet_schedule(&card->rxram_full_tl);
+               show_error = 0;
                break;
 
        /* case SPIDER_NET_GTMSHTINT: problem, print a message */
@@ -1467,10 +1454,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
                /* allrighty. tx from previous descr ok */
                show_error = 0;
                break;
-       /* case SPIDER_NET_GRFDFLLINT: print a message down there */
-       /* case SPIDER_NET_GRFCFLLINT: print a message down there */
-       /* case SPIDER_NET_GRFBFLLINT: print a message down there */
-       /* case SPIDER_NET_GRFAFLLINT: print a message down there */
 
        /* chain end */
        case SPIDER_NET_GDDDCEINT: /* fallthrough */
@@ -1482,6 +1465,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
                               "restarting DMAC %c.\n",
                               'D'+i-SPIDER_NET_GDDDCEINT);
                spider_net_refill_rx_chain(card);
+               spider_net_enable_rxdmac(card);
                show_error = 0;
                break;
 
@@ -1492,6 +1476,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
        case SPIDER_NET_GDAINVDINT:
                /* could happen when rx chain is full */
                spider_net_refill_rx_chain(card);
+               spider_net_enable_rxdmac(card);
                show_error = 0;
                break;
 
@@ -1580,17 +1565,13 @@ spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs)
        if (!status_reg)
                return IRQ_NONE;
 
-       if (status_reg & SPIDER_NET_TXINT)
-               spider_net_release_tx_chain(card, 0);
-
        if (status_reg & SPIDER_NET_RXINT ) {
                spider_net_rx_irq_off(card);
                netif_rx_schedule(netdev);
        }
 
-       /* we do this after rx and tx processing, as we want the tx chain
-        * processed to see, whether we should restart tx dma processing */
-       spider_net_handle_error_irq(card, status_reg);
+       if (status_reg & SPIDER_NET_ERRINT )
+               spider_net_handle_error_irq(card, status_reg);
 
        /* clear interrupt sources */
        spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
@@ -1831,34 +1812,40 @@ spider_net_setup_phy(struct spider_net_card *card)
 /**
  * spider_net_download_firmware - loads firmware into the adapter
  * @card: card structure
- * @firmware: firmware pointer
+ * @firmware_ptr: pointer to firmware data
  *
- * spider_net_download_firmware loads the firmware opened by
- * spider_net_init_firmware into the adapter.
+ * spider_net_download_firmware loads the firmware data into the
+ * adapter. It assumes the length etc. to be allright.
  */
-static void
+static int
 spider_net_download_firmware(struct spider_net_card *card,
-                            const struct firmware *firmware)
+                            u8 *firmware_ptr)
 {
        int sequencer, i;
-       u32 *fw_ptr = (u32 *)firmware->data;
+       u32 *fw_ptr = (u32 *)firmware_ptr;
 
        /* stop sequencers */
        spider_net_write_reg(card, SPIDER_NET_GSINIT,
                             SPIDER_NET_STOP_SEQ_VALUE);
 
-       for (sequencer = 0; sequencer < 6; sequencer++) {
+       for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
+            sequencer++) {
                spider_net_write_reg(card,
                                     SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
-               for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) {
+               for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
                        spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
                                             sequencer * 8, *fw_ptr);
                        fw_ptr++;
                }
        }
 
+       if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
+               return -EIO;
+
        spider_net_write_reg(card, SPIDER_NET_GSINIT,
                             SPIDER_NET_RUN_SEQ_VALUE);
+
+       return 0;
 }
 
 /**
@@ -1890,31 +1877,53 @@ spider_net_download_firmware(struct spider_net_card *card,
 static int
 spider_net_init_firmware(struct spider_net_card *card)
 {
-       const struct firmware *firmware;
-       int err = -EIO;
+       struct firmware *firmware = NULL;
+       struct device_node *dn;
+       u8 *fw_prop = NULL;
+       int err = -ENOENT;
+       int fw_size;
+
+       if (request_firmware((const struct firmware **)&firmware,
+                            SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
+               if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
+                    netif_msg_probe(card) ) {
+                       pr_err("Incorrect size of spidernet firmware in " \
+                              "filesystem. Looking in host firmware...\n");
+                       goto try_host_fw;
+               }
+               err = spider_net_download_firmware(card, firmware->data);
 
-       if (request_firmware(&firmware,
-                            SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) < 0) {
-               if (netif_msg_probe(card))
-                       pr_err("Couldn't read in sequencer data file %s.\n",
-                              SPIDER_NET_FIRMWARE_NAME);
-               firmware = NULL;
-               goto out;
-       }
+               release_firmware(firmware);
+               if (err)
+                       goto try_host_fw;
 
-       if (firmware->size != 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)) {
-               if (netif_msg_probe(card))
-                       pr_err("Invalid size of sequencer data file %s.\n",
-                              SPIDER_NET_FIRMWARE_NAME);
-               goto out;
+               goto done;
        }
 
-       spider_net_download_firmware(card, firmware);
+try_host_fw:
+       dn = pci_device_to_OF_node(card->pdev);
+       if (!dn)
+               goto out_err;
 
-       err = 0;
-out:
-       release_firmware(firmware);
+       fw_prop = (u8 *)get_property(dn, "firmware", &fw_size);
+       if (!fw_prop)
+               goto out_err;
+
+       if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
+            netif_msg_probe(card) ) {
+               pr_err("Incorrect size of spidernet firmware in " \
+                      "host firmware\n");
+               goto done;
+       }
 
+       err = spider_net_download_firmware(card, fw_prop);
+
+done:
+       return err;
+out_err:
+       if (netif_msg_probe(card))
+               pr_err("Couldn't find spidernet firmware in filesystem " \
+                      "or host firmware\n");
        return err;
 }
 
@@ -1934,10 +1943,11 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
                             SPIDER_NET_CKRCTRL_RUN_VALUE);
 
        /* empty sequencer data */
-       for (sequencer = 0; sequencer < 6; sequencer++) {
+       for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
+            sequencer++) {
                spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
                                     sequencer * 8, 0x0);
-               for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) {
+               for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
                        spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
                                             sequencer * 8, 0x0);
                }
@@ -2061,7 +2071,15 @@ spider_net_setup_netdev(struct spider_net_card *card)
        SET_NETDEV_DEV(netdev, &card->pdev->dev);
 
        pci_set_drvdata(card->pdev, netdev);
-       spin_lock_init(&card->intmask_lock);
+
+       atomic_set(&card->tx_chain_release,0);
+       card->rxram_full_tl.data = (unsigned long) card;
+       card->rxram_full_tl.func =
+               (void (*)(unsigned long)) spider_net_handle_rxram_full;
+       init_timer(&card->tx_timer);
+       card->tx_timer.function =
+               (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
+       card->tx_timer.data = (unsigned long) card;
        netdev->irq = card->pdev->irq;
 
        card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
index 22b2f2347351a281e80723ea4b748d16940ee3b1..5922b529a04866991ed3dfeb875c46396e0962ee 100644 (file)
@@ -33,25 +33,32 @@ extern struct ethtool_ops spider_net_ethtool_ops;
 
 extern char spider_net_driver_name[];
 
-#define SPIDER_NET_MAX_MTU                     2308
+#define SPIDER_NET_MAX_FRAME                   2312
+#define SPIDER_NET_MAX_MTU                     2294
 #define SPIDER_NET_MIN_MTU                     64
 
 #define SPIDER_NET_RXBUF_ALIGN                 128
 
-#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT      64
+#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT      256
 #define SPIDER_NET_RX_DESCRIPTORS_MIN          16
-#define SPIDER_NET_RX_DESCRIPTORS_MAX          256
+#define SPIDER_NET_RX_DESCRIPTORS_MAX          512
 
-#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT      64
+#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT      256
 #define SPIDER_NET_TX_DESCRIPTORS_MIN          16
-#define SPIDER_NET_TX_DESCRIPTORS_MAX          256
+#define SPIDER_NET_TX_DESCRIPTORS_MAX          512
+
+#define SPIDER_NET_TX_TIMER                    20
 
 #define SPIDER_NET_RX_CSUM_DEFAULT             1
 
-#define SPIDER_NET_WATCHDOG_TIMEOUT 5*HZ
-#define SPIDER_NET_NAPI_WEIGHT 64
+#define SPIDER_NET_WATCHDOG_TIMEOUT            50*HZ
+#define SPIDER_NET_NAPI_WEIGHT                 64
 
-#define SPIDER_NET_FIRMWARE_LEN                1024
+#define SPIDER_NET_FIRMWARE_SEQS       6
+#define SPIDER_NET_FIRMWARE_SEQWORDS   1024
+#define SPIDER_NET_FIRMWARE_LEN                (SPIDER_NET_FIRMWARE_SEQS * \
+                                        SPIDER_NET_FIRMWARE_SEQWORDS * \
+                                        sizeof(u32))
 #define SPIDER_NET_FIRMWARE_NAME       "spider_fw.bin"
 
 /** spider_net SMMIO registers */
@@ -142,14 +149,12 @@ extern char spider_net_driver_name[];
 /** SCONFIG registers */
 #define SPIDER_NET_SCONFIG_IOACTE      0x00002810
 
-/** hardcoded register values */
-#define SPIDER_NET_INT0_MASK_VALUE     0x3f7fe3ff
-#define SPIDER_NET_INT1_MASK_VALUE     0xffffffff
+/** interrupt mask registers */
+#define SPIDER_NET_INT0_MASK_VALUE     0x3f7fe2c7
+#define SPIDER_NET_INT1_MASK_VALUE     0xffff7ff7
 /* no MAC aborts -> auto retransmission */
-#define SPIDER_NET_INT2_MASK_VALUE     0xfffffff1
+#define SPIDER_NET_INT2_MASK_VALUE     0xffef7ff1
 
-/* clear counter when interrupt sources are cleared
-#define SPIDER_NET_FRAMENUM_VALUE      0x0001f001 */
 /* we rely on flagged descriptor interrupts */
 #define SPIDER_NET_FRAMENUM_VALUE      0x00000000
 /* set this first, then the FRAMENUM_VALUE */
@@ -168,7 +173,7 @@ extern char spider_net_driver_name[];
 #if 0
 #define SPIDER_NET_WOL_VALUE           0x00000000
 #endif
-#define SPIDER_NET_IPSECINIT_VALUE     0x00f000f8
+#define SPIDER_NET_IPSECINIT_VALUE     0x6f716f71
 
 /* pause frames: automatic, no upper retransmission count */
 /* outside loopback mode: ETOMOD signal dont matter, not connected */
@@ -318,6 +323,10 @@ enum spider_net_int2_status {
 #define SPIDER_NET_RXINT       ( (1 << SPIDER_NET_GDAFDCINT) | \
                                  (1 << SPIDER_NET_GRMFLLINT) )
 
+#define SPIDER_NET_ERRINT      ( 0xffffffff & \
+                                 (~SPIDER_NET_TXINT) & \
+                                 (~SPIDER_NET_RXINT) )
+
 #define SPIDER_NET_GPREXEC             0x80000000
 #define SPIDER_NET_GPRDAT_MASK         0x0000ffff
 
@@ -358,9 +367,6 @@ enum spider_net_int2_status {
 /* descr ready, descr is in middle of chain, get interrupt on completion */
 #define SPIDER_NET_DMAC_RX_CARDOWNED   0xa0800000
 
-/* multicast is no problem */
-#define SPIDER_NET_DATA_ERROR_MASK     0xffffbfff
-
 enum spider_net_descr_status {
        SPIDER_NET_DESCR_COMPLETE               = 0x00, /* used in rx and tx */
        SPIDER_NET_DESCR_RESPONSE_ERROR         = 0x01, /* used in rx and tx */
@@ -373,9 +379,9 @@ enum spider_net_descr_status {
 
 struct spider_net_descr {
        /* as defined by the hardware */
-       dma_addr_t buf_addr;
+       u32 buf_addr;
        u32 buf_size;
-       dma_addr_t next_descr_addr;
+       u32 next_descr_addr;
        u32 dmac_cmd_status;
        u32 result_size;
        u32 valid_size; /* all zeroes for tx */
@@ -384,7 +390,7 @@ struct spider_net_descr {
 
        /* used in the driver */
        struct sk_buff *skb;
-       dma_addr_t bus_addr;
+       u32 bus_addr;
        struct spider_net_descr *next;
        struct spider_net_descr *prev;
 } __attribute__((aligned(32)));
@@ -396,21 +402,21 @@ struct spider_net_descr_chain {
 };
 
 /* descriptor data_status bits */
-#define SPIDER_NET_RXIPCHK             29
-#define SPIDER_NET_TCPUDPIPCHK         28
-#define SPIDER_NET_DATA_STATUS_CHK_MASK        (1 << SPIDER_NET_RXIPCHK | \
-                                        1 << SPIDER_NET_TCPUDPIPCHK)
-
+#define SPIDER_NET_RX_IPCHK            29
+#define SPIDER_NET_RX_TCPCHK           28
 #define SPIDER_NET_VLAN_PACKET         21
+#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
+                                         (1 << SPIDER_NET_RX_TCPCHK) )
 
 /* descriptor data_error bits */
-#define SPIDER_NET_RXIPCHKERR          27
-#define SPIDER_NET_RXTCPCHKERR         26
-#define SPIDER_NET_DATA_ERROR_CHK_MASK (1 << SPIDER_NET_RXIPCHKERR | \
-                                        1 << SPIDER_NET_RXTCPCHKERR)
+#define SPIDER_NET_RX_IPCHKERR         27
+#define SPIDER_NET_RX_RXTCPCHKERR      28
+
+#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
 
-/* the cases we don't pass the packet to the stack */
-#define SPIDER_NET_DESTROY_RX_FLAGS    0x70138000
+/* the cases we don't pass the packet to the stack.
+ * 701b8000 would be correct, but every packets gets that flag */
+#define SPIDER_NET_DESTROY_RX_FLAGS    0x700b8000
 
 #define SPIDER_NET_DESCR_SIZE          32
 
@@ -445,13 +451,16 @@ struct spider_net_card {
 
        struct spider_net_descr_chain tx_chain;
        struct spider_net_descr_chain rx_chain;
-       spinlock_t chain_lock;
+       atomic_t rx_chain_refill;
+       atomic_t tx_chain_release;
 
        struct net_device_stats netdev_stats;
 
        struct spider_net_options options;
 
        spinlock_t intmask_lock;
+       struct tasklet_struct rxram_full_tl;
+       struct timer_list tx_timer;
 
        struct work_struct tx_timeout_task;
        atomic_t tx_timeout_task_counter;
index d42e60ba74ceb3013b2b80170aeb9ceede60c352..a5bb0b7633af2576db48f9a347962b9394506db5 100644 (file)
@@ -113,6 +113,23 @@ spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
        return 0;
 }
 
+static uint32_t
+spider_net_ethtool_get_tx_csum(struct net_device *netdev)
+{
+        return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static int
+spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
+{
+        if (data)
+                netdev->features |= NETIF_F_HW_CSUM;
+        else
+                netdev->features &= ~NETIF_F_HW_CSUM;
+
+        return 0;
+}
+
 struct ethtool_ops spider_net_ethtool_ops = {
        .get_settings           = spider_net_ethtool_get_settings,
        .get_drvinfo            = spider_net_ethtool_get_drvinfo,
@@ -122,5 +139,7 @@ struct ethtool_ops spider_net_ethtool_ops = {
        .nway_reset             = spider_net_ethtool_nway_reset,
        .get_rx_csum            = spider_net_ethtool_get_rx_csum,
        .set_rx_csum            = spider_net_ethtool_set_rx_csum,
+       .get_tx_csum            = spider_net_ethtool_get_tx_csum,
+       .set_tx_csum            = spider_net_ethtool_set_tx_csum,
 };
 
index 28ce47a02408e9946c2e3941c491b5313ca56db2..55f3b856236e13262387bbcd0df3d05846b86c8d 100644 (file)
@@ -1653,36 +1653,40 @@ static void gem_init_rings(struct gem *gp)
 /* Init PHY interface and start link poll state machine */
 static void gem_init_phy(struct gem *gp)
 {
-       u32 mifcfg;
+       u32 mif_cfg;
 
        /* Revert MIF CFG setting done on stop_phy */
-       mifcfg = readl(gp->regs + MIF_CFG);
-       mifcfg &= ~MIF_CFG_BBMODE;
-       writel(mifcfg, gp->regs + MIF_CFG);
+       mif_cfg = readl(gp->regs + MIF_CFG);
+       mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
+       mif_cfg |= MIF_CFG_MDI0;
+       writel(mif_cfg, gp->regs + MIF_CFG);
+       writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
+       writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
        
        if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
                int i;
+               u16 ctrl;
 
-               /* Those delay sucks, the HW seem to love them though, I'll
-                * serisouly consider breaking some locks here to be able
-                * to schedule instead
-                */
-               for (i = 0; i < 3; i++) {
 #ifdef CONFIG_PPC_PMAC
-                       pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
-                       msleep(20);
+               pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
 #endif
-                       /* Some PHYs used by apple have problem getting back to us,
-                        * we do an additional reset here
-                        */
-                       phy_write(gp, MII_BMCR, BMCR_RESET);
-                       msleep(20);
-                       if (phy_read(gp, MII_BMCR) != 0xffff)
+
+               /* Some PHYs used by apple have problem getting back
+                * to us, we do an additional reset here
+                */
+               phy_write(gp, MII_BMCR, BMCR_RESET);
+               for (i = 0; i < 50; i++) {
+                       if ((phy_read(gp, MII_BMCR) & BMCR_RESET) == 0)
                                break;
-                       if (i == 2)
-                               printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
-                                      gp->dev->name);
+                       msleep(10);
                }
+               if (i == 50)
+                       printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
+                              gp->dev->name);
+               /* Make sure isolate is off */
+               ctrl = phy_read(gp, MII_BMCR);
+               if (ctrl & BMCR_ISOLATE)
+                       phy_write(gp, MII_BMCR, ctrl & ~BMCR_ISOLATE);
        }
 
        if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
@@ -2119,7 +2123,7 @@ static void gem_reinit_chip(struct gem *gp)
 /* Must be invoked with no lock held. */
 static void gem_stop_phy(struct gem *gp, int wol)
 {
-       u32 mifcfg;
+       u32 mif_cfg;
        unsigned long flags;
 
        /* Let the chip settle down a bit, it seems that helps
@@ -2130,9 +2134,9 @@ static void gem_stop_phy(struct gem *gp, int wol)
        /* Make sure we aren't polling PHY status change. We
         * don't currently use that feature though
         */
-       mifcfg = readl(gp->regs + MIF_CFG);
-       mifcfg &= ~MIF_CFG_POLL;
-       writel(mifcfg, gp->regs + MIF_CFG);
+       mif_cfg = readl(gp->regs + MIF_CFG);
+       mif_cfg &= ~MIF_CFG_POLL;
+       writel(mif_cfg, gp->regs + MIF_CFG);
 
        if (wol && gp->has_wol) {
                unsigned char *e = &gp->dev->dev_addr[0];
@@ -2182,7 +2186,8 @@ static void gem_stop_phy(struct gem *gp, int wol)
                /* According to Apple, we must set the MDIO pins to this begnign
                 * state or we may 1) eat more current, 2) damage some PHYs
                 */
-               writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
+               mif_cfg = 0;
+               writel(mif_cfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
                writel(0, gp->regs + MIF_BBCLK);
                writel(0, gp->regs + MIF_BBDATA);
                writel(0, gp->regs + MIF_BBOENAB);
index eb86b059809b6ac4242bbf91d16d258e7e9a7ab5..f2d1dafde08773ad74107309b5e74ed9a0b60f16 100644 (file)
@@ -69,8 +69,8 @@
 
 #define DRV_MODULE_NAME                "tg3"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "3.47"
-#define DRV_MODULE_RELDATE     "Dec 28, 2005"
+#define DRV_MODULE_VERSION     "3.48"
+#define DRV_MODULE_RELDATE     "Jan 16, 2006"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
@@ -1325,10 +1325,12 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
                val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
                tw32(0x7d00, val);
                if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
-                       tg3_nvram_lock(tp);
+                       int err;
+
+                       err = tg3_nvram_lock(tp);
                        tg3_halt_cpu(tp, RX_CPU_BASE);
-                       tw32_f(NVRAM_SWARB, SWARB_REQ_CLR0);
-                       tg3_nvram_unlock(tp);
+                       if (!err)
+                               tg3_nvram_unlock(tp);
                }
        }
 
@@ -4193,14 +4195,19 @@ static int tg3_nvram_lock(struct tg3 *tp)
        if (tp->tg3_flags & TG3_FLAG_NVRAM) {
                int i;
 
-               tw32(NVRAM_SWARB, SWARB_REQ_SET1);
-               for (i = 0; i < 8000; i++) {
-                       if (tr32(NVRAM_SWARB) & SWARB_GNT1)
-                               break;
-                       udelay(20);
+               if (tp->nvram_lock_cnt == 0) {
+                       tw32(NVRAM_SWARB, SWARB_REQ_SET1);
+                       for (i = 0; i < 8000; i++) {
+                               if (tr32(NVRAM_SWARB) & SWARB_GNT1)
+                                       break;
+                               udelay(20);
+                       }
+                       if (i == 8000) {
+                               tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
+                               return -ENODEV;
+                       }
                }
-               if (i == 8000)
-                       return -ENODEV;
+               tp->nvram_lock_cnt++;
        }
        return 0;
 }
@@ -4208,8 +4215,12 @@ static int tg3_nvram_lock(struct tg3 *tp)
 /* tp->lock is held. */
 static void tg3_nvram_unlock(struct tg3 *tp)
 {
-       if (tp->tg3_flags & TG3_FLAG_NVRAM)
-               tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+       if (tp->tg3_flags & TG3_FLAG_NVRAM) {
+               if (tp->nvram_lock_cnt > 0)
+                       tp->nvram_lock_cnt--;
+               if (tp->nvram_lock_cnt == 0)
+                       tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+       }
 }
 
 /* tp->lock is held. */
@@ -4320,8 +4331,13 @@ static int tg3_chip_reset(struct tg3 *tp)
        void (*write_op)(struct tg3 *, u32, u32);
        int i;
 
-       if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
+       if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
                tg3_nvram_lock(tp);
+               /* No matching tg3_nvram_unlock() after this because
+                * chip reset below will undo the nvram lock.
+                */
+               tp->nvram_lock_cnt = 0;
+       }
 
        /*
         * We must avoid the readl() that normally takes place.
@@ -4717,6 +4733,10 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
                       (offset == RX_CPU_BASE ? "RX" : "TX"));
                return -ENODEV;
        }
+
+       /* Clear firmware's nvram arbitration. */
+       if (tp->tg3_flags & TG3_FLAG_NVRAM)
+               tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
        return 0;
 }
 
@@ -4736,7 +4756,7 @@ struct fw_info {
 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
                                 int cpu_scratch_size, struct fw_info *info)
 {
-       int err, i;
+       int err, lock_err, i;
        void (*write_op)(struct tg3 *, u32, u32);
 
        if (cpu_base == TX_CPU_BASE &&
@@ -4755,9 +4775,10 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
        /* It is possible that bootcode is still loading at this point.
         * Get the nvram lock first before halting the cpu.
         */
-       tg3_nvram_lock(tp);
+       lock_err = tg3_nvram_lock(tp);
        err = tg3_halt_cpu(tp, cpu_base);
-       tg3_nvram_unlock(tp);
+       if (!lock_err)
+               tg3_nvram_unlock(tp);
        if (err)
                goto out;
 
@@ -8182,7 +8203,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
                data[1] = 1;
        }
        if (etest->flags & ETH_TEST_FL_OFFLINE) {
-               int irq_sync = 0;
+               int err, irq_sync = 0;
 
                if (netif_running(dev)) {
                        tg3_netif_stop(tp);
@@ -8192,11 +8213,12 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
                tg3_full_lock(tp, irq_sync);
 
                tg3_halt(tp, RESET_KIND_SUSPEND, 1);
-               tg3_nvram_lock(tp);
+               err = tg3_nvram_lock(tp);
                tg3_halt_cpu(tp, RX_CPU_BASE);
                if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
                        tg3_halt_cpu(tp, TX_CPU_BASE);
-               tg3_nvram_unlock(tp);
+               if (!err)
+                       tg3_nvram_unlock(tp);
 
                if (tg3_test_registers(tp) != 0) {
                        etest->flags |= ETH_TEST_FL_FAILED;
@@ -8588,7 +8610,11 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
                tp->tg3_flags |= TG3_FLAG_NVRAM;
 
-               tg3_nvram_lock(tp);
+               if (tg3_nvram_lock(tp)) {
+                       printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
+                              "tg3_nvram_init failed.\n", tp->dev->name);
+                       return;
+               }
                tg3_enable_nvram_access(tp);
 
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
@@ -8686,7 +8712,9 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
        if (offset > NVRAM_ADDR_MSK)
                return -EINVAL;
 
-       tg3_nvram_lock(tp);
+       ret = tg3_nvram_lock(tp);
+       if (ret)
+               return ret;
 
        tg3_enable_nvram_access(tp);
 
@@ -8785,10 +8813,6 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
 
                offset = offset + (pagesize - page_off);
 
-               /* Nvram lock released by tg3_nvram_read() above,
-                * so need to get it again.
-                */
-               tg3_nvram_lock(tp);
                tg3_enable_nvram_access(tp);
 
                /*
@@ -8925,7 +8949,9 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
        else {
                u32 grc_mode;
 
-               tg3_nvram_lock(tp);
+               ret = tg3_nvram_lock(tp);
+               if (ret)
+                       return ret;
 
                tg3_enable_nvram_access(tp);
                if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
index 890e1635996b56294fdaca69a9bb37e78ea7ce94..e8243305f0e807a9433ac9e0762dc5266ff116aa 100644 (file)
@@ -2275,6 +2275,7 @@ struct tg3 {
        dma_addr_t                      stats_mapping;
        struct work_struct              reset_task;
 
+       int                             nvram_lock_cnt;
        u32                             nvram_size;
        u32                             nvram_pagesize;
        u32                             nvram_jedecnum;
index ee866fd6957de2cf9ed0f3b75a9ca37971c4534b..a4c7ae94614d0b0dd9a3ab0a1de9332b1ba36405 100644 (file)
@@ -5668,13 +5668,13 @@ static int airo_set_freq(struct net_device *dev,
                int channel = fwrq->m;
                /* We should do a better check than that,
                 * based on the card capability !!! */
-               if((channel < 1) || (channel > 16)) {
+               if((channel < 1) || (channel > 14)) {
                        printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m);
                        rc = -EINVAL;
                } else {
                        readConfigRid(local, 1);
                        /* Yes ! We can set it !!! */
-                       local->config.channelSet = (u16)(channel - 1);
+                       local->config.channelSet = (u16) channel;
                        set_bit (FLAG_COMMIT, &local->flags);
                }
        }
@@ -5692,6 +5692,7 @@ static int airo_get_freq(struct net_device *dev,
 {
        struct airo_info *local = dev->priv;
        StatusRid status_rid;           /* Card status info */
+       int ch;
 
        readConfigRid(local, 1);
        if ((local->config.opmode & 0xFF) == MODE_STA_ESS)
@@ -5699,16 +5700,14 @@ static int airo_get_freq(struct net_device *dev,
        else
                readStatusRid(local, &status_rid, 1);
 
-#ifdef WEXT_USECHANNELS
-       fwrq->m = ((int)status_rid.channel) + 1;
-       fwrq->e = 0;
-#else
-       {
-               int f = (int)status_rid.channel;
-               fwrq->m = frequency_list[f] * 100000;
+       ch = (int)status_rid.channel;
+       if((ch > 0) && (ch < 15)) {
+               fwrq->m = frequency_list[ch - 1] * 100000;
                fwrq->e = 1;
+       } else {
+               fwrq->m = ch;
+               fwrq->e = 0;
        }
-#endif
 
        return 0;
 }
@@ -5783,7 +5782,7 @@ static int airo_get_essid(struct net_device *dev,
        /* If none, we may want to get the one that was set */
 
        /* Push it out ! */
-       dwrq->length = status_rid.SSIDlen + 1;
+       dwrq->length = status_rid.SSIDlen;
        dwrq->flags = 1; /* active */
 
        return 0;
index f0ccfef664459fced62da435aec49d65da8dd11a..98a76f10a0f71f29f09ef031b000659dbbccb09c 100644 (file)
@@ -1718,11 +1718,11 @@ static int atmel_get_essid(struct net_device *dev,
        if (priv->new_SSID_size != 0) {
                memcpy(extra, priv->new_SSID, priv->new_SSID_size);
                extra[priv->new_SSID_size] = '\0';
-               dwrq->length = priv->new_SSID_size + 1;
+               dwrq->length = priv->new_SSID_size;
        } else {
                memcpy(extra, priv->SSID, priv->SSID_size);
                extra[priv->SSID_size] = '\0';
-               dwrq->length = priv->SSID_size + 1;
+               dwrq->length = priv->SSID_size;
        }
 
        dwrq->flags = !priv->connect_to_any_BSS; /* active */
index 56f41c714d3808e71ba53661f7c883131c46301b..308f773ad566b17b2f394273fefb5f6d1507f652 100644 (file)
@@ -26,11 +26,25 @@ config HOSTAP_FIRMWARE
        depends on HOSTAP
        ---help---
        Configure Host AP driver to include support for firmware image
-       download. Current version supports only downloading to volatile, i.e.,
-       RAM memory. Flash upgrade is not yet supported.
+       download. This option by itself only enables downloading to the
+       volatile memory, i.e. the card RAM. This option is required to
+       support cards that don't have firmware in flash, such as D-Link
+       DWL-520 rev E and D-Link DWL-650 rev P.
 
-       Firmware image downloading needs user space tool, prism2_srec. It is
-       available from http://hostap.epitest.fi/.
+       Firmware image downloading needs a user space tool, prism2_srec.
+       It is available from http://hostap.epitest.fi/.
+
+config HOSTAP_FIRMWARE_NVRAM
+       bool "Support for non-volatile firmware download"
+       depends on HOSTAP_FIRMWARE
+       ---help---
+       Allow Host AP driver to write firmware images to the non-volatile
+       card memory, i.e. flash memory that survives power cycling.
+       Enable this option if you want to be able to change card firmware
+       permanently.
+
+       Firmware image downloading needs a user space tool, prism2_srec.
+       It is available from http://hostap.epitest.fi/.
 
 config HOSTAP_PLX
        tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors"
@@ -61,7 +75,7 @@ config HOSTAP_PCI
 
 config HOSTAP_CS
        tristate "Host AP driver for Prism2/2.5/3 PC Cards"
-       depends on PCMCIA!=n && HOSTAP
+       depends on PCMCIA && HOSTAP
        ---help---
        Host AP driver's version for Prism2/2.5/3 PC Cards.
 
index 353ccb93134b79e70e38bb4a349cd681018c9576..b8e41a702c00ba5619edd864dbe35a9f5651ceb0 100644 (file)
@@ -1,4 +1,5 @@
-hostap-y := hostap_main.o
+hostap-y := hostap_80211_rx.o hostap_80211_tx.o hostap_ap.o hostap_info.o \
+            hostap_ioctl.o hostap_main.o hostap_proc.o 
 obj-$(CONFIG_HOSTAP) += hostap.o
 
 obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o
index 5fac89b8ce3a036459b1a75d2e721c54d855e9a4..5e63765219fe91fa0bf08204d4a28f8096339f28 100644 (file)
@@ -1,6 +1,15 @@
 #ifndef HOSTAP_H
 #define HOSTAP_H
 
+#include <linux/ethtool.h>
+
+#include "hostap_wlan.h"
+#include "hostap_ap.h"
+
+static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+                                 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
+#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
+
 /* hostap.c */
 
 extern struct proc_dir_entry *hostap_proc;
@@ -40,6 +49,26 @@ int prism2_update_comms_qual(struct net_device *dev);
 int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
                         u8 *body, size_t bodylen);
 int prism2_sta_deauth(local_info_t *local, u16 reason);
+int prism2_wds_add(local_info_t *local, u8 *remote_addr,
+                  int rtnl_locked);
+int prism2_wds_del(local_info_t *local, u8 *remote_addr,
+                  int rtnl_locked, int do_not_remove);
+
+
+/* hostap_ap.c */
+
+int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
+int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
+void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
+int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac);
+void ap_control_kickall(struct ap_data *ap);
+void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
+                        struct ieee80211_crypt_data ***crypt);
+int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
+                          struct iw_quality qual[], int buf_size,
+                          int aplist);
+int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
+int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param);
 
 
 /* hostap_proc.c */
@@ -54,4 +83,12 @@ void hostap_info_init(local_info_t *local);
 void hostap_info_process(local_info_t *local, struct sk_buff *skb);
 
 
+/* hostap_ioctl.c */
+
+extern const struct iw_handler_def hostap_iw_handler_def;
+extern struct ethtool_ops prism2_ethtool_ops;
+
+int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+
 #endif /* HOSTAP_H */
index bf506f50d72295a900402d51d3bb4747c64abaee..1fc72fe511e9ddfb00a1c53a528f1d93edc49d5c 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef HOSTAP_80211_H
 #define HOSTAP_80211_H
 
+#include <linux/types.h>
+#include <net/ieee80211_crypt.h>
+
 struct hostap_ieee80211_mgmt {
        u16 frame_control;
        u16 duration;
index 4b13b76425c1c0642c47c9ce77d3bd567fd425e2..7e04dc94b3bc4e9d8eaeb674b6925b8c9fa05abf 100644 (file)
@@ -1,7 +1,18 @@
 #include <linux/etherdevice.h>
+#include <net/ieee80211_crypt.h>
 
 #include "hostap_80211.h"
 #include "hostap.h"
+#include "hostap_ap.h"
+
+/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
+/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+static unsigned char rfc1042_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+static unsigned char bridge_tunnel_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+/* No encapsulation header if EtherType < 0x600 (=length) */
 
 void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
                          struct hostap_80211_rx_status *rx_stats)
index 9d24f8a38ac525843772b41143db514a9f686ecc..4a85e63906f1554b9302c452faea74cb86f4a557 100644 (file)
@@ -1,3 +1,18 @@
+#include "hostap_80211.h"
+#include "hostap_common.h"
+#include "hostap_wlan.h"
+#include "hostap.h"
+#include "hostap_ap.h"
+
+/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
+/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+static unsigned char rfc1042_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+static unsigned char bridge_tunnel_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+/* No encapsulation header if EtherType < 0x600 (=length) */
+
 void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
 {
        struct ieee80211_hdr_4addr *hdr;
index 9da94ab7f05f87e24eec2816ca5094e894906d01..753a1de6664bba6f8a90972a3ba13be751f20ec6 100644 (file)
  *   (8802.11: 5.5)
  */
 
+#include <linux/proc_fs.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+
+#include "hostap_wlan.h"
+#include "hostap.h"
+#include "hostap_ap.h"
+
 static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL,
                                                 DEF_INTS };
 module_param_array(other_ap_policy, int, NULL, 0444);
@@ -360,8 +368,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
 }
 
 
-static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
-                             u8 *mac)
+int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
 {
        struct mac_entry *entry;
 
@@ -380,8 +387,7 @@ static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
 }
 
 
-static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
-                             u8 *mac)
+int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
 {
        struct list_head *ptr;
        struct mac_entry *entry;
@@ -433,7 +439,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
 }
 
 
-static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
+void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
 {
        struct list_head *ptr, *n;
        struct mac_entry *entry;
@@ -454,8 +460,7 @@ static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
 }
 
 
-static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
-                              u8 *mac)
+int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac)
 {
        struct sta_info *sta;
        u16 resp;
@@ -486,7 +491,7 @@ static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
 #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
 
 
-static void ap_control_kickall(struct ap_data *ap)
+void ap_control_kickall(struct ap_data *ap)
 {
        struct list_head *ptr, *n;
        struct sta_info *sta;
@@ -2321,9 +2326,9 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
 }
 
 
-static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
-                                 struct iw_quality qual[], int buf_size,
-                                 int aplist)
+int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
+                          struct iw_quality qual[], int buf_size,
+                          int aplist)
 {
        struct ap_data *ap = local->ap;
        struct list_head *ptr;
@@ -2363,7 +2368,7 @@ static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
 
 /* Translate our list of Access Points & Stations to a card independant
  * format that the Wireless Tools will understand - Jean II */
-static int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
+int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
 {
        struct hostap_interface *iface;
        local_info_t *local;
@@ -2608,8 +2613,7 @@ static int prism2_hostapd_sta_clear_stats(struct ap_data *ap,
 }
 
 
-static int prism2_hostapd(struct ap_data *ap,
-                         struct prism2_hostapd_param *param)
+int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param)
 {
        switch (param->cmd) {
        case PRISM2_HOSTAPD_FLUSH:
@@ -3207,8 +3211,8 @@ void hostap_update_rates(local_info_t *local)
 }
 
 
-static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
-                               struct ieee80211_crypt_data ***crypt)
+void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
+                        struct ieee80211_crypt_data ***crypt)
 {
        struct sta_info *sta;
 
index 6d00df69c2e3e9f57f28af33a8af725aebb8bae9..2fa2452b6b07de6d6c06f76b2e4b0742c90e7dc2 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef HOSTAP_AP_H
 #define HOSTAP_AP_H
 
+#include "hostap_80211.h"
+
 /* AP data structures for STAs */
 
 /* maximum number of frames to buffer per STA */
index 6f4fa9dc308f7e55b6c85006833b7a5e13416cb9..01624005d808f541ae30f762ab5e4dad1785ff8e 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef HOSTAP_COMMON_H
 #define HOSTAP_COMMON_H
 
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
 #define BIT(x) (1 << (x))
 
 #define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
index 7ed3425d08c14a15f4a0c2802d90ec24ceef41a0..c090a5aebb58d55f108da5fbe83a92ea9e64e5b6 100644 (file)
 #define PRISM2_DOWNLOAD_SUPPORT
 #endif
 
-#ifdef PRISM2_DOWNLOAD_SUPPORT
-/* Allow writing firmware images into flash, i.e., to non-volatile storage.
- * Before you enable this option, you should make absolutely sure that you are
- * using prism2_srec utility that comes with THIS version of the driver!
- * In addition, please note that it is possible to kill your card with
- * non-volatile download if you are using incorrect image. This feature has not
- * been fully tested, so please be careful with it. */
-/* #define PRISM2_NON_VOLATILE_DOWNLOAD */
-#endif /* PRISM2_DOWNLOAD_SUPPORT */
+/* Allow kernel configuration to enable non-volatile download support. */
+#ifdef CONFIG_HOSTAP_FIRMWARE_NVRAM
+#define PRISM2_NON_VOLATILE_DOWNLOAD
+#endif
 
 /* Save low-level I/O for debugging. This should not be enabled in normal use.
  */
index 5aa998fdf1c48d276a72cd2b8ea2fc3297218b28..50f72d831cf40a8e89ffc37bbf96066825aa3484 100644 (file)
@@ -1,5 +1,8 @@
 /* Host AP driver Info Frame processing (part of hostap.o module) */
 
+#include "hostap_wlan.h"
+#include "hostap.h"
+#include "hostap_ap.h"
 
 /* Called only as a tasklet (software IRQ) */
 static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf,
index 2617d70bcda945814a96fbdf7fde59f79930cdb1..f3e0ce1ee037937a0c950e3b0c59b58b00b4d4bc 100644 (file)
@@ -1,11 +1,13 @@
 /* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
 
-#ifdef in_atomic
-/* Get kernel_locked() for in_atomic() */
+#include <linux/types.h>
 #include <linux/smp_lock.h>
-#endif
 #include <linux/ethtool.h>
+#include <net/ieee80211_crypt.h>
 
+#include "hostap_wlan.h"
+#include "hostap.h"
+#include "hostap_ap.h"
 
 static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
 {
@@ -3910,7 +3912,7 @@ static void prism2_get_drvinfo(struct net_device *dev,
                 local->sta_fw_ver & 0xff);
 }
 
-static struct ethtool_ops prism2_ethtool_ops = {
+struct ethtool_ops prism2_ethtool_ops = {
        .get_drvinfo = prism2_get_drvinfo
 };
 
@@ -3985,7 +3987,7 @@ static const iw_handler prism2_private_handler[] =
        (iw_handler) prism2_ioctl_priv_readmif,         /* 3 */
 };
 
-static const struct iw_handler_def hostap_iw_handler_def =
+const struct iw_handler_def hostap_iw_handler_def =
 {
        .num_standard   = sizeof(prism2_handler) / sizeof(iw_handler),
        .num_private    = sizeof(prism2_private_handler) / sizeof(iw_handler),
index 3d2ea61033be52fd5d0b047ba34de1d2abf20d7d..8dd4c4446a640bf6b3b14f89e054f62ef47a3ec9 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/kmod.h>
 #include <linux/rtnetlink.h>
 #include <linux/wireless.h>
+#include <linux/etherdevice.h>
 #include <net/iw_handler.h>
 #include <net/ieee80211.h>
 #include <net/ieee80211_crypt.h>
@@ -47,57 +48,6 @@ MODULE_VERSION(PRISM2_VERSION);
 #define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */))
 
 
-/* hostap.c */
-static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
-                         int rtnl_locked);
-static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
-                         int rtnl_locked, int do_not_remove);
-
-/* hostap_ap.c */
-static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
-                                 struct iw_quality qual[], int buf_size,
-                                 int aplist);
-static int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
-static int prism2_hostapd(struct ap_data *ap,
-                         struct prism2_hostapd_param *param);
-static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
-                               struct ieee80211_crypt_data ***crypt);
-static void ap_control_kickall(struct ap_data *ap);
-#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
-                             u8 *mac);
-static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
-                             u8 *mac);
-static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
-static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
-                              u8 *mac);
-#endif /* !PRISM2_NO_KERNEL_IEEE80211_MGMT */
-
-
-static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
-                                 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
-#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
-
-
-/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
-/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
-static unsigned char rfc1042_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-static unsigned char bridge_tunnel_header[] =
-{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-/* No encapsulation header if EtherType < 0x600 (=length) */
-
-
-/* FIX: these could be compiled separately and linked together to hostap.o */
-#include "hostap_ap.c"
-#include "hostap_info.c"
-#include "hostap_ioctl.c"
-#include "hostap_proc.c"
-#include "hostap_80211_rx.c"
-#include "hostap_80211_tx.c"
-
-
 struct net_device * hostap_add_interface(struct local_info *local,
                                         int type, int rtnl_locked,
                                         const char *prefix,
@@ -196,8 +146,8 @@ static inline int prism2_wds_special_addr(u8 *addr)
 }
 
 
-static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
-                         int rtnl_locked)
+int prism2_wds_add(local_info_t *local, u8 *remote_addr,
+                  int rtnl_locked)
 {
        struct net_device *dev;
        struct list_head *ptr;
@@ -258,8 +208,8 @@ static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
 }
 
 
-static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
-                         int rtnl_locked, int do_not_remove)
+int prism2_wds_del(local_info_t *local, u8 *remote_addr,
+                  int rtnl_locked, int do_not_remove)
 {
        unsigned long flags;
        struct list_head *ptr;
index a0a4cbd4937a182676939c336a8f57b60ade7c93..d1d8ce022e63af6d3060589e504adc3765df085d 100644 (file)
@@ -1,5 +1,12 @@
 /* /proc routines for Host AP driver */
 
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <net/ieee80211_crypt.h>
+
+#include "hostap_wlan.h"
+#include "hostap.h"
+
 #define PROC_LIMIT (PAGE_SIZE - 80)
 
 
index cfd8015594921a849dd00215d4190ca381b8cd91..87a54aa6f4dd2a1e2c1bd6cdbf393cb9f7aa4411 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef HOSTAP_WLAN_H
 #define HOSTAP_WLAN_H
 
+#include <linux/wireless.h>
+#include <linux/netdevice.h>
+#include <net/iw_handler.h>
+
 #include "hostap_config.h"
 #include "hostap_common.h"
 
index 7518384f34d964350796f41af2c0577272f4eac5..6290c9f7e939c2b4b21c04d97879c231063e2b24 100644 (file)
@@ -2201,6 +2201,17 @@ static int ipw2100_alloc_skb(struct ipw2100_priv *priv,
 #define SEARCH_SNAPSHOT 1
 
 #define SNAPSHOT_ADDR(ofs) (priv->snapshot[((ofs) >> 12) & 0xff] + ((ofs) & 0xfff))
+static void ipw2100_snapshot_free(struct ipw2100_priv *priv)
+{
+       int i;
+       if (!priv->snapshot[0])
+               return;
+       for (i = 0; i < 0x30; i++)
+               kfree(priv->snapshot[i]);
+       priv->snapshot[0] = NULL;
+}
+
+#ifdef CONFIG_IPW2100_DEBUG_C3
 static int ipw2100_snapshot_alloc(struct ipw2100_priv *priv)
 {
        int i;
@@ -2221,16 +2232,6 @@ static int ipw2100_snapshot_alloc(struct ipw2100_priv *priv)
        return 1;
 }
 
-static void ipw2100_snapshot_free(struct ipw2100_priv *priv)
-{
-       int i;
-       if (!priv->snapshot[0])
-               return;
-       for (i = 0; i < 0x30; i++)
-               kfree(priv->snapshot[i]);
-       priv->snapshot[0] = NULL;
-}
-
 static u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 * in_buf,
                                    size_t len, int mode)
 {
@@ -2269,6 +2270,7 @@ static u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 * in_buf,
 
        return ret;
 }
+#endif
 
 /*
  *
@@ -5735,70 +5737,6 @@ static struct net_device_stats *ipw2100_stats(struct net_device *dev)
        return &priv->ieee->stats;
 }
 
-#if WIRELESS_EXT < 18
-/* Support for wpa_supplicant before WE-18, deprecated. */
-
-/* following definitions must match definitions in driver_ipw.c */
-
-#define IPW2100_IOCTL_WPA_SUPPLICANT           SIOCIWFIRSTPRIV+30
-
-#define IPW2100_CMD_SET_WPA_PARAM              1
-#define        IPW2100_CMD_SET_WPA_IE                  2
-#define IPW2100_CMD_SET_ENCRYPTION             3
-#define IPW2100_CMD_MLME                       4
-
-#define IPW2100_PARAM_WPA_ENABLED              1
-#define IPW2100_PARAM_TKIP_COUNTERMEASURES     2
-#define IPW2100_PARAM_DROP_UNENCRYPTED         3
-#define IPW2100_PARAM_PRIVACY_INVOKED          4
-#define IPW2100_PARAM_AUTH_ALGS                        5
-#define IPW2100_PARAM_IEEE_802_1X              6
-
-#define IPW2100_MLME_STA_DEAUTH                        1
-#define IPW2100_MLME_STA_DISASSOC              2
-
-#define IPW2100_CRYPT_ERR_UNKNOWN_ALG          2
-#define IPW2100_CRYPT_ERR_UNKNOWN_ADDR         3
-#define IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED    4
-#define IPW2100_CRYPT_ERR_KEY_SET_FAILED       5
-#define IPW2100_CRYPT_ERR_TX_KEY_SET_FAILED    6
-#define IPW2100_CRYPT_ERR_CARD_CONF_FAILED     7
-
-#define        IPW2100_CRYPT_ALG_NAME_LEN              16
-
-struct ipw2100_param {
-       u32 cmd;
-       u8 sta_addr[ETH_ALEN];
-       union {
-               struct {
-                       u8 name;
-                       u32 value;
-               } wpa_param;
-               struct {
-                       u32 len;
-                       u8 reserved[32];
-                       u8 data[0];
-               } wpa_ie;
-               struct {
-                       u32 command;
-                       u32 reason_code;
-               } mlme;
-               struct {
-                       u8 alg[IPW2100_CRYPT_ALG_NAME_LEN];
-                       u8 set_tx;
-                       u32 err;
-                       u8 idx;
-                       u8 seq[8];      /* sequence counter (set: RX, get: TX) */
-                       u16 key_len;
-                       u8 key[0];
-               } crypt;
-
-       } u;
-};
-
-/* end of driver_ipw.c code */
-#endif                         /* WIRELESS_EXT < 18 */
-
 static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
 {
        /* This is called when wpa_supplicant loads and closes the driver
@@ -5807,11 +5745,6 @@ static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
        return 0;
 }
 
-#if WIRELESS_EXT < 18
-#define IW_AUTH_ALG_OPEN_SYSTEM                        0x1
-#define IW_AUTH_ALG_SHARED_KEY                 0x2
-#endif
-
 static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value)
 {
 
@@ -5855,360 +5788,6 @@ void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv,
        ipw2100_set_wpa_ie(priv, &frame, 0);
 }
 
-#if WIRELESS_EXT < 18
-static int ipw2100_wpa_set_param(struct net_device *dev, u8 name, u32 value)
-{
-       struct ipw2100_priv *priv = ieee80211_priv(dev);
-       struct ieee80211_crypt_data *crypt;
-       unsigned long flags;
-       int ret = 0;
-
-       switch (name) {
-       case IPW2100_PARAM_WPA_ENABLED:
-               ret = ipw2100_wpa_enable(priv, value);
-               break;
-
-       case IPW2100_PARAM_TKIP_COUNTERMEASURES:
-               crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
-               if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
-                       break;
-
-               flags = crypt->ops->get_flags(crypt->priv);
-
-               if (value)
-                       flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
-               else
-                       flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
-
-               crypt->ops->set_flags(flags, crypt->priv);
-
-               break;
-
-       case IPW2100_PARAM_DROP_UNENCRYPTED:{
-                       /* See IW_AUTH_DROP_UNENCRYPTED handling for details */
-                       struct ieee80211_security sec = {
-                               .flags = SEC_ENABLED,
-                               .enabled = value,
-                       };
-                       priv->ieee->drop_unencrypted = value;
-                       /* We only change SEC_LEVEL for open mode. Others
-                        * are set by ipw_wpa_set_encryption.
-                        */
-                       if (!value) {
-                               sec.flags |= SEC_LEVEL;
-                               sec.level = SEC_LEVEL_0;
-                       } else {
-                               sec.flags |= SEC_LEVEL;
-                               sec.level = SEC_LEVEL_1;
-                       }
-                       if (priv->ieee->set_security)
-                               priv->ieee->set_security(priv->ieee->dev, &sec);
-                       break;
-               }
-
-       case IPW2100_PARAM_PRIVACY_INVOKED:
-               priv->ieee->privacy_invoked = value;
-               break;
-
-       case IPW2100_PARAM_AUTH_ALGS:
-               ret = ipw2100_wpa_set_auth_algs(priv, value);
-               break;
-
-       case IPW2100_PARAM_IEEE_802_1X:
-               priv->ieee->ieee802_1x = value;
-               break;
-
-       default:
-               printk(KERN_ERR DRV_NAME ": %s: Unknown WPA param: %d\n",
-                      dev->name, name);
-               ret = -EOPNOTSUPP;
-       }
-
-       return ret;
-}
-
-static int ipw2100_wpa_mlme(struct net_device *dev, int command, int reason)
-{
-
-       struct ipw2100_priv *priv = ieee80211_priv(dev);
-       int ret = 0;
-
-       switch (command) {
-       case IPW2100_MLME_STA_DEAUTH:
-               // silently ignore
-               break;
-
-       case IPW2100_MLME_STA_DISASSOC:
-               ipw2100_disassociate_bssid(priv);
-               break;
-
-       default:
-               printk(KERN_ERR DRV_NAME ": %s: Unknown MLME request: %d\n",
-                      dev->name, command);
-               ret = -EOPNOTSUPP;
-       }
-
-       return ret;
-}
-
-static int ipw2100_wpa_set_wpa_ie(struct net_device *dev,
-                                 struct ipw2100_param *param, int plen)
-{
-
-       struct ipw2100_priv *priv = ieee80211_priv(dev);
-       struct ieee80211_device *ieee = priv->ieee;
-       u8 *buf;
-
-       if (!ieee->wpa_enabled)
-               return -EOPNOTSUPP;
-
-       if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
-           (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
-               return -EINVAL;
-
-       if (param->u.wpa_ie.len) {
-               buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
-               if (buf == NULL)
-                       return -ENOMEM;
-
-               memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
-
-               kfree(ieee->wpa_ie);
-               ieee->wpa_ie = buf;
-               ieee->wpa_ie_len = param->u.wpa_ie.len;
-
-       } else {
-               kfree(ieee->wpa_ie);
-               ieee->wpa_ie = NULL;
-               ieee->wpa_ie_len = 0;
-       }
-
-       ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
-
-       return 0;
-}
-
-/* implementation borrowed from hostap driver */
-
-static int ipw2100_wpa_set_encryption(struct net_device *dev,
-                                     struct ipw2100_param *param,
-                                     int param_len)
-{
-       int ret = 0;
-       struct ipw2100_priv *priv = ieee80211_priv(dev);
-       struct ieee80211_device *ieee = priv->ieee;
-       struct ieee80211_crypto_ops *ops;
-       struct ieee80211_crypt_data **crypt;
-
-       struct ieee80211_security sec = {
-               .flags = 0,
-       };
-
-       param->u.crypt.err = 0;
-       param->u.crypt.alg[IPW2100_CRYPT_ALG_NAME_LEN - 1] = '\0';
-
-       if (param_len !=
-           (int)((char *)param->u.crypt.key - (char *)param) +
-           param->u.crypt.key_len) {
-               IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len,
-                              param->u.crypt.key_len);
-               return -EINVAL;
-       }
-       if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
-           param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
-           param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
-               if (param->u.crypt.idx >= WEP_KEYS)
-                       return -EINVAL;
-               crypt = &ieee->crypt[param->u.crypt.idx];
-       } else {
-               return -EINVAL;
-       }
-
-       sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
-       if (strcmp(param->u.crypt.alg, "none") == 0) {
-               if (crypt) {
-                       sec.enabled = 0;
-                       sec.encrypt = 0;
-                       sec.level = SEC_LEVEL_0;
-                       sec.flags |= SEC_LEVEL;
-                       ieee80211_crypt_delayed_deinit(ieee, crypt);
-               }
-               goto done;
-       }
-       sec.enabled = 1;
-       sec.encrypt = 1;
-
-       ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
-       if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
-               request_module("ieee80211_crypt_wep");
-               ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
-       } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
-               request_module("ieee80211_crypt_tkip");
-               ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
-       } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
-               request_module("ieee80211_crypt_ccmp");
-               ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
-       }
-       if (ops == NULL) {
-               IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n",
-                              dev->name, param->u.crypt.alg);
-               param->u.crypt.err = IPW2100_CRYPT_ERR_UNKNOWN_ALG;
-               ret = -EINVAL;
-               goto done;
-       }
-
-       if (*crypt == NULL || (*crypt)->ops != ops) {
-               struct ieee80211_crypt_data *new_crypt;
-
-               ieee80211_crypt_delayed_deinit(ieee, crypt);
-
-               new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL);
-               if (new_crypt == NULL) {
-                       ret = -ENOMEM;
-                       goto done;
-               }
-               new_crypt->ops = ops;
-               if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
-                       new_crypt->priv =
-                           new_crypt->ops->init(param->u.crypt.idx);
-
-               if (new_crypt->priv == NULL) {
-                       kfree(new_crypt);
-                       param->u.crypt.err =
-                           IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED;
-                       ret = -EINVAL;
-                       goto done;
-               }
-
-               *crypt = new_crypt;
-       }
-
-       if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
-           (*crypt)->ops->set_key(param->u.crypt.key,
-                                  param->u.crypt.key_len, param->u.crypt.seq,
-                                  (*crypt)->priv) < 0) {
-               IPW_DEBUG_INFO("%s: key setting failed\n", dev->name);
-               param->u.crypt.err = IPW2100_CRYPT_ERR_KEY_SET_FAILED;
-               ret = -EINVAL;
-               goto done;
-       }
-
-       if (param->u.crypt.set_tx) {
-               ieee->tx_keyidx = param->u.crypt.idx;
-               sec.active_key = param->u.crypt.idx;
-               sec.flags |= SEC_ACTIVE_KEY;
-       }
-
-       if (ops->name != NULL) {
-
-               if (strcmp(ops->name, "WEP") == 0) {
-                       memcpy(sec.keys[param->u.crypt.idx],
-                              param->u.crypt.key, param->u.crypt.key_len);
-                       sec.key_sizes[param->u.crypt.idx] =
-                           param->u.crypt.key_len;
-                       sec.flags |= (1 << param->u.crypt.idx);
-                       sec.flags |= SEC_LEVEL;
-                       sec.level = SEC_LEVEL_1;
-               } else if (strcmp(ops->name, "TKIP") == 0) {
-                       sec.flags |= SEC_LEVEL;
-                       sec.level = SEC_LEVEL_2;
-               } else if (strcmp(ops->name, "CCMP") == 0) {
-                       sec.flags |= SEC_LEVEL;
-                       sec.level = SEC_LEVEL_3;
-               }
-       }
-      done:
-       if (ieee->set_security)
-               ieee->set_security(ieee->dev, &sec);
-
-       /* Do not reset port if card is in Managed mode since resetting will
-        * generate new IEEE 802.11 authentication which may end up in looping
-        * with IEEE 802.1X.  If your hardware requires a reset after WEP
-        * configuration (for example... Prism2), implement the reset_port in
-        * the callbacks structures used to initialize the 802.11 stack. */
-       if (ieee->reset_on_keychange &&
-           ieee->iw_mode != IW_MODE_INFRA &&
-           ieee->reset_port && ieee->reset_port(dev)) {
-               IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name);
-               param->u.crypt.err = IPW2100_CRYPT_ERR_CARD_CONF_FAILED;
-               return -EINVAL;
-       }
-
-       return ret;
-}
-
-static int ipw2100_wpa_supplicant(struct net_device *dev, struct iw_point *p)
-{
-
-       struct ipw2100_param *param;
-       int ret = 0;
-
-       IPW_DEBUG_IOCTL("wpa_supplicant: len=%d\n", p->length);
-
-       if (p->length < sizeof(struct ipw2100_param) || !p->pointer)
-               return -EINVAL;
-
-       param = (struct ipw2100_param *)kmalloc(p->length, GFP_KERNEL);
-       if (param == NULL)
-               return -ENOMEM;
-
-       if (copy_from_user(param, p->pointer, p->length)) {
-               kfree(param);
-               return -EFAULT;
-       }
-
-       switch (param->cmd) {
-
-       case IPW2100_CMD_SET_WPA_PARAM:
-               ret = ipw2100_wpa_set_param(dev, param->u.wpa_param.name,
-                                           param->u.wpa_param.value);
-               break;
-
-       case IPW2100_CMD_SET_WPA_IE:
-               ret = ipw2100_wpa_set_wpa_ie(dev, param, p->length);
-               break;
-
-       case IPW2100_CMD_SET_ENCRYPTION:
-               ret = ipw2100_wpa_set_encryption(dev, param, p->length);
-               break;
-
-       case IPW2100_CMD_MLME:
-               ret = ipw2100_wpa_mlme(dev, param->u.mlme.command,
-                                      param->u.mlme.reason_code);
-               break;
-
-       default:
-               printk(KERN_ERR DRV_NAME
-                      ": %s: Unknown WPA supplicant request: %d\n", dev->name,
-                      param->cmd);
-               ret = -EOPNOTSUPP;
-
-       }
-
-       if (ret == 0 && copy_to_user(p->pointer, param, p->length))
-               ret = -EFAULT;
-
-       kfree(param);
-       return ret;
-}
-
-static int ipw2100_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
-       struct iwreq *wrq = (struct iwreq *)rq;
-       int ret = -1;
-       switch (cmd) {
-       case IPW2100_IOCTL_WPA_SUPPLICANT:
-               ret = ipw2100_wpa_supplicant(dev, &wrq->u.data);
-               return ret;
-
-       default:
-               return -EOPNOTSUPP;
-       }
-
-       return -EOPNOTSUPP;
-}
-#endif                         /* WIRELESS_EXT < 18 */
-
 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
                                    struct ethtool_drvinfo *info)
 {
@@ -6337,9 +5916,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
        dev->open = ipw2100_open;
        dev->stop = ipw2100_close;
        dev->init = ipw2100_net_init;
-#if WIRELESS_EXT < 18
-       dev->do_ioctl = ipw2100_ioctl;
-#endif
        dev->get_stats = ipw2100_stats;
        dev->ethtool_ops = &ipw2100_ethtool_ops;
        dev->tx_timeout = ipw2100_tx_timeout;
@@ -7538,11 +7114,17 @@ static int ipw2100_wx_set_txpow(struct net_device *dev,
 {
        struct ipw2100_priv *priv = ieee80211_priv(dev);
        int err = 0, value;
+       
+       if (ipw_radio_kill_sw(priv, wrqu->txpower.disabled))
+               return -EINPROGRESS;
 
        if (priv->ieee->iw_mode != IW_MODE_ADHOC)
+               return 0;
+
+       if ((wrqu->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
                return -EINVAL;
 
-       if (wrqu->txpower.disabled == 1 || wrqu->txpower.fixed == 0)
+       if (wrqu->txpower.fixed == 0)
                value = IPW_TX_POWER_DEFAULT;
        else {
                if (wrqu->txpower.value < IPW_TX_POWER_MIN_DBM ||
@@ -7577,24 +7159,19 @@ static int ipw2100_wx_get_txpow(struct net_device *dev,
 
        struct ipw2100_priv *priv = ieee80211_priv(dev);
 
-       if (priv->ieee->iw_mode != IW_MODE_ADHOC) {
-               wrqu->power.disabled = 1;
-               return 0;
-       }
+       wrqu->txpower.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
 
        if (priv->tx_power == IPW_TX_POWER_DEFAULT) {
-               wrqu->power.fixed = 0;
-               wrqu->power.value = IPW_TX_POWER_MAX_DBM;
-               wrqu->power.disabled = 1;
+               wrqu->txpower.fixed = 0;
+               wrqu->txpower.value = IPW_TX_POWER_MAX_DBM;
        } else {
-               wrqu->power.disabled = 0;
-               wrqu->power.fixed = 1;
-               wrqu->power.value = priv->tx_power;
+               wrqu->txpower.fixed = 1;
+               wrqu->txpower.value = priv->tx_power;
        }
 
-       wrqu->power.flags = IW_TXPOW_DBM;
+       wrqu->txpower.flags = IW_TXPOW_DBM;
 
-       IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->power.value);
+       IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->txpower.value);
 
        return 0;
 }
@@ -7855,7 +7432,6 @@ static int ipw2100_wx_get_power(struct net_device *dev,
        return 0;
 }
 
-#if WIRELESS_EXT > 17
 /*
  * WE-18 WPA support
  */
@@ -8117,7 +7693,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
        }
        return 0;
 }
-#endif                         /* WIRELESS_EXT > 17 */
 
 /*
  *
@@ -8350,11 +7925,7 @@ static iw_handler ipw2100_wx_handlers[] = {
        NULL,                   /* SIOCWIWTHRSPY */
        ipw2100_wx_set_wap,     /* SIOCSIWAP */
        ipw2100_wx_get_wap,     /* SIOCGIWAP */
-#if WIRELESS_EXT > 17
        ipw2100_wx_set_mlme,    /* SIOCSIWMLME */
-#else
-       NULL,                   /* -- hole -- */
-#endif
        NULL,                   /* SIOCGIWAPLIST -- deprecated */
        ipw2100_wx_set_scan,    /* SIOCSIWSCAN */
        ipw2100_wx_get_scan,    /* SIOCGIWSCAN */
@@ -8378,7 +7949,6 @@ static iw_handler ipw2100_wx_handlers[] = {
        ipw2100_wx_get_encode,  /* SIOCGIWENCODE */
        ipw2100_wx_set_power,   /* SIOCSIWPOWER */
        ipw2100_wx_get_power,   /* SIOCGIWPOWER */
-#if WIRELESS_EXT > 17
        NULL,                   /* -- hole -- */
        NULL,                   /* -- hole -- */
        ipw2100_wx_set_genie,   /* SIOCSIWGENIE */
@@ -8388,7 +7958,6 @@ static iw_handler ipw2100_wx_handlers[] = {
        ipw2100_wx_set_encodeext,       /* SIOCSIWENCODEEXT */
        ipw2100_wx_get_encodeext,       /* SIOCGIWENCODEEXT */
        NULL,                   /* SIOCSIWPMKSA */
-#endif
 };
 
 #define IPW2100_PRIV_SET_MONITOR       SIOCIWFIRSTPRIV
index 819be2b6b7df03e87036217cf26d573cef4f3b43..916b24c544e2ec662521ca45bbb8386408f9b4d6 100644 (file)
@@ -8012,6 +8012,10 @@ static int ipw_sw_reset(struct ipw_priv *priv, int init)
        else
                IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
 
+       priv->config &= ~CFG_STATIC_ESSID;
+       priv->essid_len = 0;
+       memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
+
        if (disable) {
                priv->status |= STATUS_RF_KILL_SW;
                IPW_DEBUG_INFO("Radio disabled.\n");
@@ -8936,14 +8940,12 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
        IPW_DEBUG_HC("starting request direct scan!\n");
 
        if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
-               err = wait_event_interruptible(priv->wait_state,
-                                              !(priv->
-                                                status & (STATUS_SCANNING |
-                                                          STATUS_SCAN_ABORTING)));
-               if (err) {
-                       IPW_DEBUG_HC("aborting direct scan");
-                       goto done;
-               }
+               /* We should not sleep here; otherwise we will block most
+                * of the system (for instance, we hold rtnl_lock when we
+                * get here).
+                */
+               err = -EAGAIN;
+               goto done;
        }
        memset(&scan, 0, sizeof(scan));
 
@@ -11037,7 +11039,6 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        net_dev->set_multicast_list = ipw_net_set_multicast_list;
        net_dev->set_mac_address = ipw_net_set_mac_address;
        priv->wireless_data.spy_data = &priv->ieee->spy_data;
-       priv->wireless_data.ieee80211 = priv->ieee;
        net_dev->wireless_data = &priv->wireless_data;
        net_dev->wireless_handlers = &ipw_wx_handler_def;
        net_dev->ethtool_ops = &ipw_ethtool_ops;
@@ -11123,8 +11124,8 @@ static void ipw_pci_remove(struct pci_dev *pdev)
        /* Free MAC hash list for ADHOC */
        for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
                list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
-                       kfree(list_entry(p, struct ipw_ibss_seq, list));
                        list_del(p);
+                       kfree(list_entry(p, struct ipw_ibss_seq, list));
                }
        }
 
index b664708481cc8ace4cbfb0bd74f3884e64900830..3c128b692bce23135343064458b501749f7d0724 100644 (file)
@@ -261,13 +261,13 @@ orinoco_cs_config(dev_link_t *link)
                /* Note that the CIS values need to be rescaled */
                if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
                        if (conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) {
-                               DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n",  conf.Vcc, cfg->vcc.param[CISTPL_POWER_VNOM] / 10000);
+                               DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, cfg CIS = %d)\n",  conf.Vcc, cfg->vcc.param[CISTPL_POWER_VNOM] / 10000);
                                if (!ignore_cis_vcc)
                                        goto next_entry;
                        }
                } else if (dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) {
                        if (conf.Vcc != dflt.vcc.param[CISTPL_POWER_VNOM] / 10000) {
-                               DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n",  conf.Vcc, dflt.vcc.param[CISTPL_POWER_VNOM] / 10000);
+                               DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, dflt CIS = %d)\n",  conf.Vcc, dflt.vcc.param[CISTPL_POWER_VNOM] / 10000);
                                if(!ignore_cis_vcc)
                                        goto next_entry;
                        }
index 135a156db25d9c07607d7ce0cae5cb9019636417..c5cd61c7f92774043c3184b2b5b23983748adda0 100644 (file)
@@ -748,7 +748,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
        if (essid->length) {
                dwrq->flags = 1;        /* set ESSID to ON for Wireless Extensions */
                /* if it is to big, trunk it */
-               dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length + 1);
+               dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length);
        } else {
                dwrq->flags = 0;
                dwrq->length = 0;
index 33d64d2ee53f7397fb3b9db833a6e6a89611f1ee..a8261d8454dd54edff637a164439fc213a7e822a 100644 (file)
@@ -177,7 +177,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
 #endif
 
                        newskb->dev = skb->dev;
-                       dev_kfree_skb(skb);
+                       dev_kfree_skb_irq(skb);
                        skb = newskb;
                }
        }
index 319180ca7e71325cc30350c85cd6bae57391e8da..7880d8c31aadc20abb96e554af606d4a337663f1 100644 (file)
@@ -1256,7 +1256,7 @@ static int ray_get_essid(struct net_device *dev,
        extra[IW_ESSID_MAX_SIZE] = '\0';
 
        /* Push it out ! */
-       dwrq->length = strlen(extra) + 1;
+       dwrq->length = strlen(extra);
        dwrq->flags = 1; /* active */
 
        return 0;
index 7e2039f52c49fdcef509b7529626d38282522fb9..cf373625fc7074e3bdfb2f2abf214856c4142c81 100644 (file)
@@ -2280,7 +2280,7 @@ static int wavelan_get_essid(struct net_device *dev,
        extra[IW_ESSID_MAX_SIZE] = '\0';
 
        /* Set the length */
-       wrqu->data.length = strlen(extra) + 1;
+       wrqu->data.length = strlen(extra);
 
        return 0;
 }
index 202b7507a357092906680bd9e937e7882e428324..8e1ba0b7a8e41770a2ab4e18c835b682c2a2a0a1 100644 (file)
@@ -416,7 +416,9 @@ static void attach_msi_entry(struct msi_desc *entry, int vector)
 
 static void irq_handler_init(int cap_id, int pos, int mask)
 {
-       spin_lock(&irq_desc[pos].lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&irq_desc[pos].lock, flags);
        if (cap_id == PCI_CAP_ID_MSIX)
                irq_desc[pos].handler = &msix_irq_type;
        else {
@@ -425,7 +427,7 @@ static void irq_handler_init(int cap_id, int pos, int mask)
                else
                        irq_desc[pos].handler = &msi_irq_w_maskbit_type;
        }
-       spin_unlock(&irq_desc[pos].lock);
+       spin_unlock_irqrestore(&irq_desc[pos].lock, flags);
 }
 
 static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
index 605f0df0bfba941f3090e8cd3025235a857fc972..dda6099903c18adc8636d89683ddc02a9a17b32a 100644 (file)
@@ -1142,6 +1142,9 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
        case 0x27c4:
                ich = 7;
                break;
+       case 0x2828:    /* ICH8M */
+               ich = 8;
+               break;
        default:
                /* we do not handle this PCI device */
                return;
@@ -1161,7 +1164,7 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
                else
                        return;                 /* not in combined mode */
        } else {
-               WARN_ON((ich != 6) && (ich != 7));
+               WARN_ON((ich != 6) && (ich != 7) && (ich != 8));
                tmp &= 0x3;  /* interesting bits 1:0 */
                if (tmp & (1 << 0))
                        comb = (1 << 2);        /* PATA port 0, SATA port 1 */
index d113290b5fc01ccf977902881eb60b2f699c4bc1..19bd346951dd0e2073d33e8b5d964b1fc032c7a1 100644 (file)
@@ -276,6 +276,16 @@ static const struct pci_device_id ahci_pci_tbl[] = {
          board_ahci }, /* ESB2 */
        { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
          board_ahci }, /* ICH7-M DH */
+       { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+         board_ahci }, /* ICH8 */
+       { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+         board_ahci }, /* ICH8 */
+       { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+         board_ahci }, /* ICH8 */
+       { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+         board_ahci }, /* ICH8M */
+       { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+         board_ahci }, /* ICH8M */
        { }     /* terminate list */
 };
 
index 69ed77fcb71f9076213761b2a18025a16408d406..7955ebe8e1e8d6d8be83c5f659efb878c8cedb85 100644 (file)
@@ -37,13 +37,13 @@ config AIC79XX_CMDS_PER_DEVICE
 config AIC79XX_RESET_DELAY_MS
        int "Initial bus reset delay in milli-seconds"
        depends on SCSI_AIC79XX
-       default "15000"
+       default "5000"
        ---help---
        The number of milliseconds to delay after an initial bus reset.
        The bus settle delay following all error recovery actions is
        dictated by the SCSI layer and is not affected by this value.
 
-       Default: 15000 (15 seconds)
+       Default: 5000 (5 seconds)
 
 config AIC79XX_BUILD_FIRMWARE
        bool "Build Adapter Firmware with Kernel Build"
index 2cfdbef447db6ba7c4f7b774fcce47fe5f53e409..1d11f7e77564e9d83379532bc892d06da4b7a4e4 100644 (file)
@@ -37,7 +37,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: //depot/aic7xxx/aic7xxx/aic79xx.h#108 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.h#109 $
  *
  * $FreeBSD$
  */
@@ -222,6 +222,7 @@ typedef enum {
 typedef enum {
        AHD_FENONE              = 0x00000,
        AHD_WIDE                = 0x00001,/* Wide Channel */
+       AHD_AIC79XXB_SLOWCRC    = 0x00002,/* SLOWCRC bit should be set */
        AHD_MULTI_FUNC          = 0x00100,/* Multi-Function/Channel Device */
        AHD_TARGETMODE          = 0x01000,/* Has tested target mode support */
        AHD_MULTIROLE           = 0x02000,/* Space for two roles at a time */
index 3a3204703b155f374a7cf99cb86dcb2b028f8c05..be14e2ecb8f796b6bdaebe3c4926481160a238bd 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Aic79xx register and scratch ram definitions.
  *
- * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 1994-2001, 2004 Justin T. Gibbs.
  * Copyright (c) 2000-2002 Adaptec Inc.
  * All rights reserved.
  *
@@ -39,7 +39,7 @@
  *
  * $FreeBSD$
  */
-VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#76 $"
+VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $"
 
 /*
  * This file is processed by the aic7xxx_asm utility for use in assembling
@@ -3715,8 +3715,9 @@ scratch_ram {
 
        SEQ_FLAGS2 {
                size            1
-               field   TARGET_MSG_PENDING        0x02
-               field   SELECTOUT_QFROZEN         0x04
+               field   PENDING_MK_MESSAGE      0x01
+               field   TARGET_MSG_PENDING      0x02
+               field   SELECTOUT_QFROZEN       0x04
        }
 
        ALLOCFIFO_SCBPTR {
@@ -3777,6 +3778,26 @@ scratch_ram {
        CMDSIZE_TABLE {
                size            8
        }
+       /*
+        * When an SCB with the MK_MESSAGE flag is
+        * queued to the controller, it cannot enter
+        * the waiting for selection list until the
+        * selections for any previously queued
+        * commands to that target complete.  During
+        * the wait, the MK_MESSAGE SCB is queued
+        * here.
+        */
+       MK_MESSAGE_SCB {
+               size            2
+       }
+       /*
+        * Saved SCSIID of MK_MESSAGE_SCB to avoid
+        * an extra SCBPTR operation when deciding
+        * if the MK_MESSAGE_SCB can be run.
+        */
+       MK_MESSAGE_SCSIID {
+               size            1
+       }
 }
 
 /************************* Hardware SCB Definition ****************************/
index bef1f9d369b6b90779d13f3ee54a53c7a105bb3a..58bc17591b54ce56082d67aa937f9a8f7e51a0fe 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Adaptec U320 device driver firmware for Linux and FreeBSD.
  *
- * Copyright (c) 1994-2001 Justin T. Gibbs.
+ * Copyright (c) 1994-2001, 2004 Justin T. Gibbs.
  * Copyright (c) 2000-2002 Adaptec Inc.
  * All rights reserved.
  *
@@ -40,7 +40,7 @@
  * $FreeBSD$
  */
 
-VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#119 $"
+VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $"
 PATCH_ARG_LIST = "struct ahd_softc *ahd"
 PREFIX = "ahd_"
 
@@ -110,10 +110,8 @@ check_waiting_list:
         * one last time.
         */
        test    SSTAT0, SELDO jnz select_out;
-END_CRITICAL;
        call    start_selection;
 idle_loop_checkbus:
-BEGIN_CRITICAL;
        test    SSTAT0, SELDO jnz select_out;
 END_CRITICAL;
        test    SSTAT0, SELDI jnz select_in;
@@ -294,7 +292,6 @@ fetch_new_scb_inprog:
        test    CCSCBCTL, ARRDONE jz return;
 fetch_new_scb_done:
        and     CCSCBCTL, ~(CCARREN|CCSCBEN);
-       bmov    REG0, SCBPTR, 2;
        clr     A;
        add     CMDS_PENDING, 1;
        adc     CMDS_PENDING[1], A;
@@ -316,43 +313,117 @@ fetch_new_scb_done:
        clr     SCB_FIFO_USE_COUNT;
        /* Update the next SCB address to download. */
        bmov    NEXT_QUEUED_SCB_ADDR, SCB_NEXT_SCB_BUSADDR, 4;
+       /*
+        * NULL out the SCB links since these fields
+        * occupy the same location as SCB_NEXT_SCB_BUSADDR.
+        */
        mvi     SCB_NEXT[1], SCB_LIST_NULL;
        mvi     SCB_NEXT2[1], SCB_LIST_NULL;
        /* Increment our position in the QINFIFO. */
        mov     NONE, SNSCB_QOFF;
+
        /*
-        * SCBs that want to send messages are always
-        * queued independently.  This ensures that they
-        * are at the head of the SCB list to select out
-        * to a target and we will see the MK_MESSAGE flag.
+        * Save SCBID of this SCB in REG0 since
+        * SCBPTR will be clobbered during target
+        * list updates.  We also record the SCB's
+        * flags so that we can refer to them even
+        * after SCBPTR has been changed.
+        */
+       bmov    REG0, SCBPTR, 2;
+       mov     A, SCB_CONTROL;
+
+       /*
+        * Find the tail SCB of the execution queue
+        * for this target.
         */
-       test    SCB_CONTROL, MK_MESSAGE jnz first_new_target_scb;
        shr     SINDEX, 3, SCB_SCSIID;
        and     SINDEX, ~0x1;
        mvi     SINDEX[1], (WAITING_SCB_TAILS >> 8);
        bmov    DINDEX, SINDEX, 2;
        bmov    SCBPTR, SINDIR, 2;
+
+       /*
+        * Update the tail to point to the new SCB.
+        */
        bmov    DINDIR, REG0, 2;
+
+       /*
+        * If the queue was empty, queue this SCB as
+        * the first for this target.
+        */
        cmp     SCBPTR[1], SCB_LIST_NULL je first_new_target_scb;
+
+       /*
+        * SCBs that want to send messages must always be
+        * at the head of their per-target queue so that
+        * ATN can be asserted even if the current
+        * negotiation agreement is packetized.  If the
+        * target queue is empty, the SCB can be queued
+        * immediately.  If the queue is not empty, we must
+        * wait for it to empty before entering this SCB
+        * into the waiting for selection queue.  Otherwise
+        * our batching and round-robin selection scheme 
+        * could allow commands to be queued out of order.
+        * To simplify the implementation, we stop pulling
+        * new commands from the host until the MK_MESSAGE
+        * SCB can be queued to the waiting for selection
+        * list.
+        */
+       test    A, MK_MESSAGE jz batch_scb; 
+
+       /*
+        * If the last SCB is also a MK_MESSAGE SCB, then
+        * order is preserved even if we batch.
+        */
+       test    SCB_CONTROL, MK_MESSAGE jz batch_scb; 
+
+       /*
+        * Defer this SCB and stop fetching new SCBs until
+        * it can be queued.  Since the SCB_SCSIID of the
+        * tail SCB must be the same as that of the newly
+        * queued SCB, there is no need to restore the SCBID
+        * here.
+        */
+       or      SEQ_FLAGS2, PENDING_MK_MESSAGE;
+       bmov    MK_MESSAGE_SCB, REG0, 2;
+       mov     MK_MESSAGE_SCSIID, SCB_SCSIID ret;
+
+batch_scb:
+       /*
+        * Otherwise just update the previous tail SCB to
+        * point to the new tail.
+        */
        bmov    SCB_NEXT, REG0, 2 ret;
+
 first_new_target_scb:
+       /*
+        * Append SCB to the tail of the waiting for
+        * selection list.
+        */
        cmp     WAITING_TID_HEAD[1], SCB_LIST_NULL je first_new_scb;
        bmov    SCBPTR, WAITING_TID_TAIL, 2;
        bmov    SCB_NEXT2, REG0, 2;
        bmov    WAITING_TID_TAIL, REG0, 2 ret;
 first_new_scb:
+       /*
+        * Whole list is empty, so the head of
+        * the list must be initialized too.
+        */
        bmov    WAITING_TID_HEAD, REG0, 2;
        bmov    WAITING_TID_TAIL, REG0, 2 ret;
 END_CRITICAL;
 
 scbdma_idle:
        /*
-        * Give precedence to downloading new SCBs to execute
-        * unless select-outs are currently frozen.
+        * Don't bother downloading new SCBs to execute
+        * if select-outs are currently frozen or we have
+        * a MK_MESSAGE SCB waiting to enter the queue.
         */
-       test    SEQ_FLAGS2, SELECTOUT_QFROZEN jnz . + 2;
+       test    SEQ_FLAGS2, SELECTOUT_QFROZEN|PENDING_MK_MESSAGE
+               jnz scbdma_no_new_scbs;
 BEGIN_CRITICAL;
        test    QOFF_CTLSTA, NEW_SCB_AVAIL jnz fetch_new_scb;
+scbdma_no_new_scbs:
        cmp     COMPLETE_DMA_SCB_HEAD[1], SCB_LIST_NULL jne dma_complete_scb;
        cmp     COMPLETE_SCB_HEAD[1], SCB_LIST_NULL je return;
        /* FALLTHROUGH */
@@ -671,27 +742,41 @@ curscb_ww_done:
        }
 
        /*
-        * Requeue any SCBs not sent, to the tail of the waiting Q.
+        * The whole list made it.  Clear our tail pointer to indicate
+        * that the per-target selection queue is now empty.
         */
-       cmp     SCB_NEXT[1], SCB_LIST_NULL je select_out_list_done;
+       cmp     SCB_NEXT[1], SCB_LIST_NULL je select_out_clear_tail;
 
        /*
+        * Requeue any SCBs not sent, to the tail of the waiting Q.
         * We know that neither the per-TID list nor the list of
-        * TIDs is empty.  Use this knowledge to our advantage.
+        * TIDs is empty.  Use this knowledge to our advantage and
+        * queue the remainder to the tail of the global execution
+        * queue.
         */
        bmov    REG0, SCB_NEXT, 2;
+select_out_queue_remainder:
        bmov    SCBPTR, WAITING_TID_TAIL, 2;
        bmov    SCB_NEXT2, REG0, 2;
        bmov    WAITING_TID_TAIL, REG0, 2;
        jmp     select_out_inc_tid_q;
 
-select_out_list_done:
+select_out_clear_tail:
+       /*
+        * Queue any pending MK_MESSAGE SCB for this target now
+        * that the queue is empty.
+        */
+       test    SEQ_FLAGS2, PENDING_MK_MESSAGE jz select_out_no_mk_message_scb;
+       mov     A, MK_MESSAGE_SCSIID;
+       cmp     SCB_SCSIID, A jne select_out_no_mk_message_scb;
+       and     SEQ_FLAGS2, ~PENDING_MK_MESSAGE;
+       bmov    REG0, MK_MESSAGE_SCB, 2;
+       jmp select_out_queue_remainder;
+
+select_out_no_mk_message_scb:
        /*
-        * The whole list made it.  Just clear our TID's tail pointer
-        * unless we were queued independently due to our need to
-        * send a message.
+        * Clear this target's execution tail and increment the queue.
         */
-       test    SCB_CONTROL, MK_MESSAGE jnz select_out_inc_tid_q;
        shr     DINDEX, 3, SCB_SCSIID;
        or      DINDEX, 1;      /* Want only the second byte */
        mvi     DINDEX[1], ((WAITING_SCB_TAILS) >> 8);
@@ -703,8 +788,8 @@ select_out_inc_tid_q:
        mvi     WAITING_TID_TAIL[1], SCB_LIST_NULL;
        bmov    SCBPTR, CURRSCB, 2;
        mvi     CLRSINT0, CLRSELDO;
-       test    LQOSTAT2, LQOPHACHGOUTPKT jnz unexpected_nonpkt_phase;
-       test    LQOSTAT1, LQOPHACHGINPKT jnz unexpected_nonpkt_phase;
+       test    LQOSTAT2, LQOPHACHGOUTPKT jnz unexpected_nonpkt_mode_cleared;
+       test    LQOSTAT1, LQOPHACHGINPKT jnz unexpected_nonpkt_mode_cleared;
 
        /*
         * If this is a packetized connection, return to our
@@ -2127,6 +2212,18 @@ SET_DST_MODE     M_DFF0;
        mvi     DFFSXFRCTL, CLRCHN;
 unexpected_nonpkt_mode_cleared:
        mvi     CLRSINT2, CLRNONPACKREQ;
+       if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) {
+               /*
+                * Test to ensure that the bus has not
+                * already gone free prior to clearing
+                * any stale busfree status.  This avoids
+                * a window whereby a busfree just after
+                * a selection could be missed.
+                */
+               test    SCSISIGI, BSYI jz . + 2;
+               mvi     CLRSINT1,CLRBUSFREE;
+               or      SIMODE1, ENBUSFREE;
+       }
        test    SCSIPHASE, ~(MSG_IN_PHASE|MSG_OUT_PHASE) jnz illegal_phase;
        SET_SEQINTCODE(ENTERING_NONPACK)
        jmp     ITloop;
index db8f5ce99ee3f815a018a1264bb3c50299a7ce99..342f77966a5ba6d066f2ba3eb96c3ba9801845ef 100644 (file)
@@ -37,7 +37,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#247 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $
  */
 
 #ifdef __linux__
@@ -197,7 +197,8 @@ static int          ahd_search_scb_list(struct ahd_softc *ahd, int target,
                                            char channel, int lun, u_int tag,
                                            role_t role, uint32_t status,
                                            ahd_search_action action,
-                                           u_int *list_head, u_int tid);
+                                           u_int *list_head, u_int *list_tail,
+                                           u_int tid);
 static void            ahd_stitch_tid_list(struct ahd_softc *ahd,
                                            u_int tid_prev, u_int tid_cur,
                                            u_int tid_next);
@@ -1660,7 +1661,8 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
                 * so just clear the error.
                 */
                ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ);
-       } else if ((status & BUSFREE) != 0) {
+       } else if ((status & BUSFREE) != 0
+               || (lqistat1 & LQOBUSFREE) != 0) {
                u_int lqostat1;
                int   restart;
                int   clear_fifo;
@@ -2025,10 +2027,6 @@ ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime)
                u_int waiting_t;
                u_int next;
 
-               if ((busfreetime & BUSFREE_LQO) == 0)
-                       printf("%s: Warning, BUSFREE time is 0x%x.  "
-                              "Expected BUSFREE_LQO.\n",
-                              ahd_name(ahd), busfreetime);
                /*
                 * The LQO manager detected an unexpected busfree
                 * either:
@@ -2251,8 +2249,14 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
                        struct ahd_tmode_tstate *tstate;
 
                        /*
-                        * PPR Rejected.  Try non-ppr negotiation
-                        * and retry command.
+                        * PPR Rejected.
+                        *
+                        * If the previous negotiation was packetized,
+                        * this could be because the device has been
+                        * reset without our knowledge.  Force our
+                        * current negotiation to async and retry the
+                        * negotiation.  Otherwise retry the command
+                        * with non-ppr negotiation.
                         */
 #ifdef AHD_DEBUG
                        if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
@@ -2261,11 +2265,34 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
                        tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
                                                    devinfo.our_scsiid,
                                                    devinfo.target, &tstate);
-                       tinfo->curr.transport_version = 2;
-                       tinfo->goal.transport_version = 2;
-                       tinfo->goal.ppr_options = 0;
-                       ahd_qinfifo_requeue_tail(ahd, scb);
-                       printerror = 0;
+                       if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) {
+                               ahd_set_width(ahd, &devinfo,
+                                             MSG_EXT_WDTR_BUS_8_BIT,
+                                             AHD_TRANS_CUR,
+                                             /*paused*/TRUE);
+                               ahd_set_syncrate(ahd, &devinfo,
+                                               /*period*/0, /*offset*/0,
+                                               /*ppr_options*/0,
+                                               AHD_TRANS_CUR,
+                                               /*paused*/TRUE);
+                               /*
+                                * The expect PPR busfree handler below
+                                * will effect the retry and necessary
+                                * abort.
+                                */
+                       } else {
+                               tinfo->curr.transport_version = 2;
+                               tinfo->goal.transport_version = 2;
+                               tinfo->goal.ppr_options = 0;
+                               /*
+                                * Remove any SCBs in the waiting for selection
+                                * queue that may also be for this target so
+                                * that command ordering is preserved.
+                                */
+                               ahd_freeze_devq(ahd, scb);
+                               ahd_qinfifo_requeue_tail(ahd, scb);
+                               printerror = 0;
+                       }
                } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
                        && ppr_busfree == 0) {
                        /*
@@ -2280,6 +2307,12 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
                                      MSG_EXT_WDTR_BUS_8_BIT,
                                      AHD_TRANS_CUR|AHD_TRANS_GOAL,
                                      /*paused*/TRUE);
+                       /*
+                        * Remove any SCBs in the waiting for selection
+                        * queue that may also be for this target so that
+                        * command ordering is preserved.
+                        */
+                       ahd_freeze_devq(ahd, scb);
                        ahd_qinfifo_requeue_tail(ahd, scb);
                        printerror = 0;
                } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
@@ -2297,6 +2330,12 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
                                        /*ppr_options*/0,
                                        AHD_TRANS_CUR|AHD_TRANS_GOAL,
                                        /*paused*/TRUE);
+                       /*
+                        * Remove any SCBs in the waiting for selection
+                        * queue that may also be for this target so that
+                        * command ordering is preserved.
+                        */
+                       ahd_freeze_devq(ahd, scb);
                        ahd_qinfifo_requeue_tail(ahd, scb);
                        printerror = 0;
                } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
@@ -2369,14 +2408,14 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
                         */
                        printf("%s: ", ahd_name(ahd));
                }
-               if (lastphase != P_BUSFREE)
-                       ahd_force_renegotiation(ahd, &devinfo);
                printf("Unexpected busfree %s, %d SCBs aborted, "
                       "PRGMCNT == 0x%x\n",
                       ahd_lookup_phase_entry(lastphase)->phasemsg,
                       aborted,
                       ahd_inw(ahd, PRGMCNT));
                ahd_dump_card_state(ahd);
+               if (lastphase != P_BUSFREE)
+                       ahd_force_renegotiation(ahd, &devinfo);
        }
        /* Always restart the sequencer. */
        return (1);
@@ -3292,6 +3331,15 @@ ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
        if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT)
                con_opts |= WIDEXFER;
 
+       /*
+        * Slow down our CRC interval to be
+        * compatible with packetized U320 devices
+        * that can't handle a CRC at full speed
+        */
+       if (ahd->features & AHD_AIC79XXB_SLOWCRC) {
+               con_opts |= ENSLOWCRC;
+       }
+
        /*
         * During packetized transfers, the target will
         * give us the oportunity to send command packets
@@ -3315,7 +3363,6 @@ ahd_update_pending_scbs(struct ahd_softc *ahd)
 {
        struct          scb *pending_scb;
        int             pending_scb_count;
-       u_int           scb_tag;
        int             paused;
        u_int           saved_scbptr;
        ahd_mode_state  saved_modes;
@@ -3333,7 +3380,6 @@ ahd_update_pending_scbs(struct ahd_softc *ahd)
        pending_scb_count = 0;
        LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
                struct ahd_devinfo devinfo;
-               struct hardware_scb *pending_hscb;
                struct ahd_initiator_tinfo *tinfo;
                struct ahd_tmode_tstate *tstate;
 
@@ -3341,11 +3387,10 @@ ahd_update_pending_scbs(struct ahd_softc *ahd)
                tinfo = ahd_fetch_transinfo(ahd, devinfo.channel,
                                            devinfo.our_scsiid,
                                            devinfo.target, &tstate);
-               pending_hscb = pending_scb->hscb;
                if ((tstate->auto_negotiate & devinfo.target_mask) == 0
                 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
                        pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
-                       pending_hscb->control &= ~MK_MESSAGE;
+                       pending_scb->hscb->control &= ~MK_MESSAGE;
                }
                ahd_sync_scb(ahd, pending_scb,
                             BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
@@ -3377,18 +3422,15 @@ ahd_update_pending_scbs(struct ahd_softc *ahd)
                ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO);
        saved_scbptr = ahd_get_scbptr(ahd);
        /* Ensure that the hscbs down on the card match the new information */
-       for (scb_tag = 0; scb_tag < ahd->scb_data.maxhscbs; scb_tag++) {
-               struct  hardware_scb *pending_hscb;
+       LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
+               u_int   scb_tag;
                u_int   control;
 
-               pending_scb = ahd_lookup_scb(ahd, scb_tag);
-               if (pending_scb == NULL)
-                       continue;
+               scb_tag = SCB_GET_TAG(pending_scb);
                ahd_set_scbptr(ahd, scb_tag);
-               pending_hscb = pending_scb->hscb;
                control = ahd_inb_scbram(ahd, SCB_CONTROL);
                control &= ~MK_MESSAGE;
-               control |= pending_hscb->control & MK_MESSAGE;
+               control |= pending_scb->hscb->control & MK_MESSAGE;
                ahd_outb(ahd, SCB_CONTROL, control);
        }
        ahd_set_scbptr(ahd, saved_scbptr);
@@ -6500,13 +6542,14 @@ ahd_chip_init(struct ahd_softc *ahd)
                              | ENLQIOVERI_LQ|ENLQIOVERI_NLQ);
        ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC);
        /*
-        * An interrupt from LQOBUSFREE is made redundant by the
-        * BUSFREE interrupt.  We choose to have the sequencer catch
-        * LQOPHCHGINPKT errors manually for the command phase at the
-        * start of a packetized selection case.
-       ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE|ENLQOPHACHGINPKT);
+        * We choose to have the sequencer catch LQOPHCHGINPKT errors
+        * manually for the command phase at the start of a packetized
+        * selection case.  ENLQOBUSFREE should be made redundant by
+        * the BUSFREE interrupt, but it seems that some LQOBUSFREE
+        * events fail to assert the BUSFREE interrupt so we must
+        * also enable LQOBUSFREE interrupts.
         */
-       ahd_outb(ahd, LQOMODE1, 0);
+       ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE);
 
        /*
         * Setup sequencer interrupt handlers.
@@ -6617,6 +6660,8 @@ ahd_chip_init(struct ahd_softc *ahd)
        /* We don't have any waiting selections */
        ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL);
        ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL);
+       ahd_outw(ahd, MK_MESSAGE_SCB, SCB_LIST_NULL);
+       ahd_outw(ahd, MK_MESSAGE_SCSIID, 0xFF);
        for (i = 0; i < AHD_NUM_TARGETS; i++)
                ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL);
 
@@ -6704,6 +6749,18 @@ ahd_chip_init(struct ahd_softc *ahd)
 
        ahd_loadseq(ahd);
        ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+
+       if (ahd->features & AHD_AIC79XXB_SLOWCRC) {
+               u_int negodat3 = ahd_inb(ahd, NEGCONOPTS);
+
+               negodat3 |= ENSLOWCRC;
+               ahd_outb(ahd, NEGCONOPTS, negodat3);
+               negodat3 = ahd_inb(ahd, NEGCONOPTS);
+               if (!(negodat3 & ENSLOWCRC))
+                       printf("aic79xx: failed to set the SLOWCRC bit\n");
+               else
+                       printf("aic79xx: SLOWCRC bit set\n");
+       }
 }
 
 /*
@@ -7260,12 +7317,28 @@ ahd_reset_cmds_pending(struct ahd_softc *ahd)
        ahd->flags &= ~AHD_UPDATE_PEND_CMDS;
 }
 
+void
+ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status)
+{
+       cam_status ostat;
+       cam_status cstat;
+
+       ostat = ahd_get_transaction_status(scb);
+       if (ostat == CAM_REQ_INPROG)
+               ahd_set_transaction_status(scb, status);
+       cstat = ahd_get_transaction_status(scb);
+       if (cstat != CAM_REQ_CMP)
+               ahd_freeze_scb(scb);
+       ahd_done(ahd, scb);
+}
+
 int
 ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
                   int lun, u_int tag, role_t role, uint32_t status,
                   ahd_search_action action)
 {
        struct scb      *scb;
+       struct scb      *mk_msg_scb;
        struct scb      *prev_scb;
        ahd_mode_state   saved_modes;
        u_int            qinstart;
@@ -7274,6 +7347,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
        u_int            tid_next;
        u_int            tid_prev;
        u_int            scbid;
+       u_int            seq_flags2;
        u_int            savedscbptr;
        uint32_t         busaddr;
        int              found;
@@ -7329,23 +7403,10 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
                        found++;
                        switch (action) {
                        case SEARCH_COMPLETE:
-                       {
-                               cam_status ostat;
-                               cam_status cstat;
-
-                               ostat = ahd_get_transaction_status(scb);
-                               if (ostat == CAM_REQ_INPROG)
-                                       ahd_set_transaction_status(scb,
-                                                                  status);
-                               cstat = ahd_get_transaction_status(scb);
-                               if (cstat != CAM_REQ_CMP)
-                                       ahd_freeze_scb(scb);
                                if ((scb->flags & SCB_ACTIVE) == 0)
                                        printf("Inactive SCB in qinfifo\n");
-                               ahd_done(ahd, scb);
-
+                               ahd_done_with_status(ahd, scb, status);
                                /* FALLTHROUGH */
-                       }
                        case SEARCH_REMOVE:
                                break;
                        case SEARCH_PRINT:
@@ -7375,21 +7436,24 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
         * looking for matches.
         */
        ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
+       seq_flags2 = ahd_inb(ahd, SEQ_FLAGS2);
+       if ((seq_flags2 & PENDING_MK_MESSAGE) != 0) {
+               scbid = ahd_inw(ahd, MK_MESSAGE_SCB);
+               mk_msg_scb = ahd_lookup_scb(ahd, scbid);
+       } else
+               mk_msg_scb = NULL;
        savedscbptr = ahd_get_scbptr(ahd);
        tid_next = ahd_inw(ahd, WAITING_TID_HEAD);
        tid_prev = SCB_LIST_NULL;
        targets = 0;
        for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) {
                u_int tid_head;
+               u_int tid_tail;
 
-               /*
-                * We limit based on the number of SCBs since
-                * MK_MESSAGE SCBs are not in the per-tid lists.
-                */
                targets++;
-               if (targets > AHD_SCB_MAX) {
+               if (targets > AHD_NUM_TARGETS)
                        panic("TID LIST LOOP");
-               }
+
                if (scbid >= ahd->scb_data.numscbs) {
                        printf("%s: Waiting TID List inconsistency. "
                               "SCB index == 0x%x, yet numscbs == 0x%x.",
@@ -7419,8 +7483,71 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
                tid_head = scbid;
                found += ahd_search_scb_list(ahd, target, channel,
                                             lun, tag, role, status,
-                                            action, &tid_head,
+                                            action, &tid_head, &tid_tail,
                                             SCB_GET_TARGET(ahd, scb));
+               /*
+                * Check any MK_MESSAGE SCB that is still waiting to
+                * enter this target's waiting for selection queue.
+                */
+               if (mk_msg_scb != NULL
+                && ahd_match_scb(ahd, mk_msg_scb, target, channel,
+                                 lun, tag, role)) {
+
+                       /*
+                        * We found an scb that needs to be acted on.
+                        */
+                       found++;
+                       switch (action) {
+                       case SEARCH_COMPLETE:
+                               if ((mk_msg_scb->flags & SCB_ACTIVE) == 0)
+                                       printf("Inactive SCB pending MK_MSG\n");
+                               ahd_done_with_status(ahd, mk_msg_scb, status);
+                               /* FALLTHROUGH */
+                       case SEARCH_REMOVE:
+                       {
+                               u_int tail_offset;
+
+                               printf("Removing MK_MSG scb\n");
+
+                               /*
+                                * Reset our tail to the tail of the
+                                * main per-target list.
+                                */
+                               tail_offset = WAITING_SCB_TAILS
+                                   + (2 * SCB_GET_TARGET(ahd, mk_msg_scb));
+                               ahd_outw(ahd, tail_offset, tid_tail);
+
+                               seq_flags2 &= ~PENDING_MK_MESSAGE;
+                               ahd_outb(ahd, SEQ_FLAGS2, seq_flags2);
+                               ahd_outw(ahd, CMDS_PENDING,
+                                        ahd_inw(ahd, CMDS_PENDING)-1);
+                               mk_msg_scb = NULL;
+                               break;
+                       }
+                       case SEARCH_PRINT:
+                               printf(" 0x%x", SCB_GET_TAG(scb));
+                               /* FALLTHROUGH */
+                       case SEARCH_COUNT:
+                               break;
+                       }
+               }
+
+               if (mk_msg_scb != NULL
+                && SCBID_IS_NULL(tid_head)
+                && ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD,
+                                 SCB_LIST_NULL, ROLE_UNKNOWN)) {
+
+                       /*
+                        * When removing the last SCB for a target
+                        * queue with a pending MK_MESSAGE scb, we
+                        * must queue the MK_MESSAGE scb.
+                        */
+                       printf("Queueing mk_msg_scb\n");
+                       tid_head = ahd_inw(ahd, MK_MESSAGE_SCB);
+                       seq_flags2 &= ~PENDING_MK_MESSAGE;
+                       ahd_outb(ahd, SEQ_FLAGS2, seq_flags2);
+                       mk_msg_scb = NULL;
+               }
                if (tid_head != scbid)
                        ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next);
                if (!SCBID_IS_NULL(tid_head))
@@ -7428,6 +7555,8 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
                if (action == SEARCH_PRINT)
                        printf(")\n");
        }
+
+       /* Restore saved state. */
        ahd_set_scbptr(ahd, savedscbptr);
        ahd_restore_modes(ahd, saved_modes);
        return (found);
@@ -7436,7 +7565,8 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
 static int
 ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
                    int lun, u_int tag, role_t role, uint32_t status,
-                   ahd_search_action action, u_int *list_head, u_int tid)
+                   ahd_search_action action, u_int *list_head, 
+                   u_int *list_tail, u_int tid)
 {
        struct  scb *scb;
        u_int   scbid;
@@ -7448,6 +7578,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
        found = 0;
        prev = SCB_LIST_NULL;
        next = *list_head;
+       *list_tail = SCB_LIST_NULL;
        for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) {
                if (scbid >= ahd->scb_data.numscbs) {
                        printf("%s:SCB List inconsistency. "
@@ -7463,6 +7594,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
                        panic("Waiting List traversal\n");
                }
                ahd_set_scbptr(ahd, scbid);
+               *list_tail = scbid;
                next = ahd_inw_scbram(ahd, SCB_NEXT);
                if (ahd_match_scb(ahd, scb, target, channel,
                                  lun, SCB_LIST_NULL, role) == 0) {
@@ -7472,24 +7604,14 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
                found++;
                switch (action) {
                case SEARCH_COMPLETE:
-               {
-                       cam_status ostat;
-                       cam_status cstat;
-
-                       ostat = ahd_get_transaction_status(scb);
-                       if (ostat == CAM_REQ_INPROG)
-                               ahd_set_transaction_status(scb, status);
-                       cstat = ahd_get_transaction_status(scb);
-                       if (cstat != CAM_REQ_CMP)
-                               ahd_freeze_scb(scb);
                        if ((scb->flags & SCB_ACTIVE) == 0)
                                printf("Inactive SCB in Waiting List\n");
-                       ahd_done(ahd, scb);
+                       ahd_done_with_status(ahd, scb, status);
                        /* FALLTHROUGH */
-               }
                case SEARCH_REMOVE:
                        ahd_rem_wscb(ahd, scbid, prev, next, tid);
-                       if (prev == SCB_LIST_NULL)
+                       *list_tail = prev;
+                       if (SCBID_IS_NULL(prev))
                                *list_head = next;
                        break;
                case SEARCH_PRINT:
@@ -7558,14 +7680,17 @@ ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
        }
 
        /*
-        * SCBs that had MK_MESSAGE set in them will not
-        * be queued to the per-target lists, so don't
-        * blindly clear the tail pointer.
+        * SCBs that have MK_MESSAGE set in them may
+        * cause the tail pointer to be updated without
+        * setting the next pointer of the previous tail.
+        * Only clear the tail if the removed SCB was
+        * the tail.
         */
        tail_offset = WAITING_SCB_TAILS + (2 * tid);
        if (SCBID_IS_NULL(next)
         && ahd_inw(ahd, tail_offset) == scbid)
                ahd_outw(ahd, tail_offset, prev);
+
        ahd_add_scb_to_free_list(ahd, scbid);
        return (next);
 }
@@ -8148,11 +8273,6 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
                ahd_setup_data_scb(ahd, scb);
                scb->flags |= SCB_SENSE;
                ahd_queue_scb(ahd, scb);
-               /*
-                * Ensure we have enough time to actually
-                * retrieve the sense.
-                */
-               ahd_scb_timer_reset(scb, 5 * 1000000);
                break;
        }
        case SCSI_STATUS_OK:
@@ -8793,6 +8913,9 @@ ahd_dump_card_state(struct ahd_softc *ahd)
         * Mode independent registers.
         */
        cur_col = 0;
+       ahd_intstat_print(ahd_inb(ahd, INTSTAT), &cur_col, 50);
+       ahd_seloid_print(ahd_inb(ahd, SELOID), &cur_col, 50);
+       ahd_selid_print(ahd_inb(ahd, SELID), &cur_col, 50);
        ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50);
        ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50);
        ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50);
@@ -8808,6 +8931,12 @@ ahd_dump_card_state(struct ahd_softc *ahd)
        ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50);
        ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50);
        ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50);
+       ahd_qfreeze_count_print(ahd_inw(ahd, QFREEZE_COUNT), &cur_col, 50);
+       ahd_kernel_qfreeze_count_print(ahd_inw(ahd, KERNEL_QFREEZE_COUNT),
+                                      &cur_col, 50);
+       ahd_mk_message_scb_print(ahd_inw(ahd, MK_MESSAGE_SCB), &cur_col, 50);
+       ahd_mk_message_scsiid_print(ahd_inb(ahd, MK_MESSAGE_SCSIID),
+                                   &cur_col, 50);
        ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50);
        ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50);
        ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50);
@@ -8915,7 +9044,7 @@ ahd_dump_card_state(struct ahd_softc *ahd)
 
                ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i);
                fifo_scbptr = ahd_get_scbptr(ahd);
-               printf("\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n",
+               printf("\n\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n",
                       ahd_name(ahd), i,
                       (dffstat & (FIFO0FREE << i)) ? "Free" : "Active",
                       ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr);
@@ -8970,6 +9099,9 @@ ahd_dump_card_state(struct ahd_softc *ahd)
        printf("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n",
               ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT),
               ahd_inb(ahd, MAXCMDCNT));
+       printf("%s: SAVED_SCSIID = 0x%x SAVED_LUN = 0x%x\n",
+              ahd_name(ahd), ahd_inb(ahd, SAVED_SCSIID),
+              ahd_inb(ahd, SAVED_LUN));
        ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50);
        printf("\n");
        ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
index 91c4f7f484b1271d37cf7b7334998aa30cff8344..8ad3ce945b9e5f36b20953af23ff1717d64403a4 100644 (file)
@@ -37,7 +37,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: //depot/aic7xxx/aic7xxx/aic79xx_inline.h#58 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx_inline.h#59 $
  *
  * $FreeBSD$
  */
@@ -804,9 +804,10 @@ ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
                uint64_t host_dataptr;
 
                host_dataptr = ahd_le64toh(scb->hscb->dataptr);
-               printf("%s: Queueing SCB 0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
+               printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
                       ahd_name(ahd),
-                      SCB_GET_TAG(scb), ahd_le32toh(scb->hscb->hscb_busaddr),
+                      SCB_GET_TAG(scb), scb->hscb->scsiid,
+                      ahd_le32toh(scb->hscb->hscb_busaddr),
                       (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
                       (u_int)(host_dataptr & 0xFFFFFFFF),
                       ahd_le32toh(scb->hscb->datacnt));
index 2567e29960bd1697565490d2840a3312f6414ab1..7254ea535a160c81b9b016b4a254a8468ea96028 100644 (file)
@@ -314,6 +314,21 @@ static uint32_t aic79xx_seltime;
  */
 uint32_t aic79xx_periodic_otag;
 
+/* Some storage boxes are using an LSI chip which has a bug making it
+ * impossible to use aic79xx Rev B chip in 320 speeds.  The following
+ * storage boxes have been reported to be buggy:
+ * EonStor 3U 16-Bay: U16U-G3A3
+ * EonStor 2U 12-Bay: U12U-G3A3
+ * SentinelRAID: 2500F R5 / R6
+ * SentinelRAID: 2500F R1
+ * SentinelRAID: 2500F/1500F
+ * SentinelRAID: 150F
+ * 
+ * To get around this LSI bug, you can set your board to 160 mode
+ * or you can enable the SLOWCRC bit.
+ */
+uint32_t aic79xx_slowcrc;
+
 /*
  * Module information and settable options.
  */
@@ -343,6 +358,7 @@ MODULE_PARM_DESC(aic79xx,
 "      amplitude:<int>         Set the signal amplitude (0-7).\n"
 "      seltime:<int>           Selection Timeout:\n"
 "                              (0/256ms,1/128ms,2/64ms,3/32ms)\n"
+"      slowcrc                 Turn on the SLOWCRC bit (Rev B only)\n"          
 "\n"
 "      Sample /etc/modprobe.conf line:\n"
 "              Enable verbose logging\n"
@@ -1003,6 +1019,7 @@ aic79xx_setup(char *s)
                { "slewrate", NULL },
                { "precomp", NULL },
                { "amplitude", NULL },
+               { "slowcrc", &aic79xx_slowcrc },
        };
 
        end = strchr(s, '\0');
@@ -1072,7 +1089,6 @@ ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *templa
                return (ENOMEM);
 
        *((struct ahd_softc **)host->hostdata) = ahd;
-       ahd_lock(ahd, &s);
        ahd->platform_data->host = host;
        host->can_queue = AHD_MAX_QUEUE;
        host->cmd_per_lun = 2;
@@ -1083,7 +1099,9 @@ ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *templa
        host->max_lun = AHD_NUM_LUNS;
        host->max_channel = 0;
        host->sg_tablesize = AHD_NSEG;
+       ahd_lock(ahd, &s);
        ahd_set_unit(ahd, ahd_linux_unit++);
+       ahd_unlock(ahd, &s);
        sprintf(buf, "scsi%d", host->host_no);
        new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
        if (new_name != NULL) {
@@ -1093,7 +1111,6 @@ ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *templa
        host->unique_id = ahd->unit;
        ahd_linux_initialize_scsi_bus(ahd);
        ahd_intr_enable(ahd, TRUE);
-       ahd_unlock(ahd, &s);
 
        host->transportt = ahd_linux_transport_template;
 
@@ -1127,6 +1144,7 @@ ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd)
 {
        u_int target_id;
        u_int numtarg;
+       unsigned long s;
 
        target_id = 0;
        numtarg = 0;
@@ -1139,6 +1157,8 @@ ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd)
        else
                numtarg = (ahd->features & AHD_WIDE) ? 16 : 8;
 
+       ahd_lock(ahd, &s);
+
        /*
         * Force negotiation to async for all targets that
         * will not see an initial bus reset.
@@ -1155,16 +1175,12 @@ ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd)
                ahd_update_neg_request(ahd, &devinfo, tstate,
                                       tinfo, AHD_NEG_ALWAYS);
        }
+       ahd_unlock(ahd, &s);
        /* Give the bus some time to recover */
        if ((ahd->flags & AHD_RESET_BUS_A) != 0) {
                ahd_freeze_simq(ahd);
-               init_timer(&ahd->platform_data->reset_timer);
-               ahd->platform_data->reset_timer.data = (u_long)ahd;
-               ahd->platform_data->reset_timer.expires =
-                   jiffies + (AIC79XX_RESET_DELAY * HZ)/1000;
-               ahd->platform_data->reset_timer.function =
-                   (ahd_linux_callback_t *)ahd_release_simq;
-               add_timer(&ahd->platform_data->reset_timer);
+               msleep(AIC79XX_RESET_DELAY);
+               ahd_release_simq(ahd);
        }
 }
 
@@ -2033,6 +2049,9 @@ ahd_linux_sem_timeout(u_long arg)
 void
 ahd_freeze_simq(struct ahd_softc *ahd)
 {
+       unsigned long s;
+
+       ahd_lock(ahd, &s);
        ahd->platform_data->qfrozen++;
        if (ahd->platform_data->qfrozen == 1) {
                scsi_block_requests(ahd->platform_data->host);
@@ -2040,6 +2059,7 @@ ahd_freeze_simq(struct ahd_softc *ahd)
                                        CAM_LUN_WILDCARD, SCB_LIST_NULL,
                                        ROLE_INITIATOR, CAM_REQUEUE_REQ);
        }
+       ahd_unlock(ahd, &s);
 }
 
 void
@@ -2344,8 +2364,9 @@ done:
                               ahd_name(ahd), dev->active);
                        retval = FAILED;
                }
-       }
-       ahd_unlock(ahd, &flags);
+       } else
+               ahd_unlock(ahd, &flags);
+
        return (retval);
 }
 
index cb74fccc81007130f602eb75dda433a5d890f113..9cb10134510739933510327cdfaeda8db636c199 100644 (file)
@@ -36,7 +36,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.h#137 $
+ * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.h#166 $
  *
  */
 #ifndef _AIC79XX_LINUX_H_
@@ -228,7 +228,6 @@ typedef struct timer_list ahd_timer_t;
 typedef void ahd_linux_callback_t (u_long);  
 static __inline void ahd_timer_reset(ahd_timer_t *timer, int usec,
                                     ahd_callback_t *func, void *arg);
-static __inline void ahd_scb_timer_reset(struct scb *scb, u_int usec);
 
 static __inline void
 ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
@@ -243,12 +242,6 @@ ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
        add_timer(timer);
 }
 
-static __inline void
-ahd_scb_timer_reset(struct scb *scb, u_int usec)
-{
-       mod_timer(&scb->io_ctx->eh_timeout, jiffies + (usec * HZ)/1000000);
-}
-
 /***************************** SMP support ************************************/
 #include <linux/spinlock.h>
 
@@ -389,7 +382,6 @@ struct ahd_platform_data {
 
        spinlock_t               spin_lock;
        u_int                    qfrozen;
-       struct timer_list        reset_timer;
        struct semaphore         eh_sem;
        struct Scsi_Host        *host;          /* pointer to scsi host */
 #define AHD_LINUX_NOIRQ        ((uint32_t)~0)
index bf360ae021abb0582482d4702e903a9eda4ac7f8..ebbf7e4ff4cc653e6639caa0ccc8ae5c5867f267 100644 (file)
@@ -220,10 +220,10 @@ ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, u_long *base,
        *base2 = pci_resource_start(ahd->dev_softc, 3);
        if (*base == 0 || *base2 == 0)
                return (ENOMEM);
-       if (request_region(*base, 256, "aic79xx") == 0)
+       if (!request_region(*base, 256, "aic79xx"))
                return (ENOMEM);
-       if (request_region(*base2, 256, "aic79xx") == 0) {
-               release_region(*base2, 256);
+       if (!request_region(*base2, 256, "aic79xx")) {
+               release_region(*base, 256);
                return (ENOMEM);
        }
        return (0);
@@ -237,7 +237,7 @@ ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
        u_long  start;
        u_long  base_page;
        u_long  base_offset;
-       int     error;
+       int     error = 0;
 
        if (aic79xx_allow_memio == 0)
                return (ENOMEM);
@@ -245,16 +245,15 @@ ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
        if ((ahd->bugs & AHD_PCIX_MMAPIO_BUG) != 0)
                return (ENOMEM);
 
-       error = 0;
        start = pci_resource_start(ahd->dev_softc, 1);
        base_page = start & PAGE_MASK;
        base_offset = start - base_page;
        if (start != 0) {
                *bus_addr = start;
-               if (request_mem_region(start, 0x1000, "aic79xx") == 0)
+               if (!request_mem_region(start, 0x1000, "aic79xx"))
                        error = ENOMEM;
-               if (error == 0) {
-                       *maddr = ioremap_nocache(base_page, base_offset + 256);
+               if (!error) {
+                       *maddr = ioremap_nocache(base_page, base_offset + 512);
                        if (*maddr == NULL) {
                                error = ENOMEM;
                                release_mem_region(start, 0x1000);
@@ -344,7 +343,7 @@ ahd_pci_map_int(struct ahd_softc *ahd)
 
        error = request_irq(ahd->dev_softc->irq, ahd_linux_isr,
                            SA_SHIRQ, "aic79xx", ahd);
-       if (error == 0)
+       if (!error)
                ahd->platform_data->irq = ahd->dev_softc->irq;
        
        return (-error);
index 196a6344b03703ec9b96182d747f20d5b50eafbb..757242e522c2cab761b8bddc1eeb821f8d5bd4d6 100644 (file)
@@ -38,7 +38,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#89 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#92 $
  */
 
 #ifdef __linux__
@@ -950,12 +950,19 @@ ahd_aic790X_setup(struct ahd_softc *ahd)
                if ((ahd->flags & AHD_HP_BOARD) == 0)
                        AHD_SET_SLEWRATE(ahd, AHD_SLEWRATE_DEF_REVA);
        } else {
+               /* This is revision B and newer. */
+               extern uint32_t aic79xx_slowcrc;
                u_int devconfig1;
 
                ahd->features |= AHD_RTI|AHD_NEW_IOCELL_OPTS
-                             |  AHD_NEW_DFCNTRL_OPTS|AHD_FAST_CDB_DELIVERY;
+                             |  AHD_NEW_DFCNTRL_OPTS|AHD_FAST_CDB_DELIVERY
+                             |  AHD_BUSFREEREV_BUG;
                ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG;
 
+               /* If the user requested the the SLOWCRC bit to be set. */
+               if (aic79xx_slowcrc)
+                       ahd->features |= AHD_AIC79XXB_SLOWCRC;
+
                /*
                 * Some issues have been resolved in the 7901B.
                 */
index 8763b158856b56aef0dd856c55cafe1a89c11c43..2068e00d2c750a09c8b733aff1fa01af385a395e 100644 (file)
@@ -2,8 +2,8 @@
  * DO NOT EDIT - This file is automatically generated
  *              from the following source files:
  *
- * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#119 $
- * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#76 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
  */
 typedef int (ahd_reg_print_t)(u_int, u_int *, u_int);
 typedef struct ahd_reg_parse_entry {
@@ -2203,6 +2203,20 @@ ahd_reg_print_t ahd_cmdsize_table_print;
     ahd_print_register(NULL, 0, "CMDSIZE_TABLE", 0x158, regvalue, cur_col, wrap)
 #endif
 
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_mk_message_scb_print;
+#else
+#define ahd_mk_message_scb_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MK_MESSAGE_SCB", 0x160, regvalue, cur_col, wrap)
+#endif
+
+#if AIC_DEBUG_REGISTERS
+ahd_reg_print_t ahd_mk_message_scsiid_print;
+#else
+#define ahd_mk_message_scsiid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MK_MESSAGE_SCSIID", 0x162, regvalue, cur_col, wrap)
+#endif
+
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scb_base_print;
 #else
@@ -3638,6 +3652,7 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define        SEQ_FLAGS2                      0x14d
 #define                SELECTOUT_QFROZEN       0x04
 #define                TARGET_MSG_PENDING      0x02
+#define                PENDING_MK_MESSAGE      0x01
 
 #define        ALLOCFIFO_SCBPTR                0x14e
 
@@ -3655,6 +3670,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define        CMDSIZE_TABLE                   0x158
 
+#define        MK_MESSAGE_SCB                  0x160
+
+#define        MK_MESSAGE_SCSIID               0x162
+
 #define        SCB_BASE                        0x180
 
 #define        SCB_RESIDUAL_DATACNT            0x180
@@ -3800,5 +3819,5 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 
 /* Exported Labels */
-#define        LABEL_seq_isr   0x285
-#define        LABEL_timer_isr 0x281
+#define        LABEL_seq_isr   0x28f
+#define        LABEL_timer_isr 0x28b
index a4137c985376b6eabff30ed6a366356eafc4e87a..db38a61a8cb4f2cf15160f4406a4749fcb716ad9 100644 (file)
@@ -2,8 +2,8 @@
  * DO NOT EDIT - This file is automatically generated
  *              from the following source files:
  *
- * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#118 $
- * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#75 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
  */
 
 #include "aic79xx_osm.h"
@@ -3382,6 +3382,7 @@ ahd_initiator_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
 }
 
 static ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
+       { "PENDING_MK_MESSAGE", 0x01, 0x01 },
        { "TARGET_MSG_PENDING", 0x02, 0x02 },
        { "SELECTOUT_QFROZEN",  0x04, 0x04 }
 };
@@ -3389,7 +3390,7 @@ static ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
 int
 ahd_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
-       return (ahd_print_register(SEQ_FLAGS2_parse_table, 2, "SEQ_FLAGS2",
+       return (ahd_print_register(SEQ_FLAGS2_parse_table, 3, "SEQ_FLAGS2",
            0x14d, regvalue, cur_col, wrap));
 }
 
@@ -3449,6 +3450,20 @@ ahd_cmdsize_table_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x158, regvalue, cur_col, wrap));
 }
 
+int
+ahd_mk_message_scb_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+       return (ahd_print_register(NULL, 0, "MK_MESSAGE_SCB",
+           0x160, regvalue, cur_col, wrap));
+}
+
+int
+ahd_mk_message_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+       return (ahd_print_register(NULL, 0, "MK_MESSAGE_SCSIID",
+           0x162, regvalue, cur_col, wrap));
+}
+
 int
 ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
index b1e5365be23005f11957da625356bd7c0f147c20..11bed07e90b7ef8573ae0d5fc5afcb7f3211ab07 100644 (file)
@@ -2,17 +2,17 @@
  * DO NOT EDIT - This file is automatically generated
  *              from the following source files:
  *
- * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#119 $
- * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#76 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $
+ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
  */
 static uint8_t seqprog[] = {
        0xff, 0x02, 0x06, 0x78,
-       0x00, 0xea, 0x64, 0x59,
+       0x00, 0xea, 0x6e, 0x59,
        0x01, 0xea, 0x04, 0x30,
        0xff, 0x04, 0x0c, 0x78,
-       0x19, 0xea, 0x64, 0x59,
+       0x19, 0xea, 0x6e, 0x59,
        0x19, 0xea, 0x04, 0x00,
-       0x33, 0xea, 0x5e, 0x59,
+       0x33, 0xea, 0x68, 0x59,
        0x33, 0xea, 0x00, 0x00,
        0x60, 0x3a, 0x3a, 0x68,
        0x04, 0x4d, 0x35, 0x78,
@@ -33,15 +33,15 @@ static uint8_t seqprog[] = {
        0xff, 0xea, 0x62, 0x02,
        0x00, 0xe2, 0x3a, 0x40,
        0xff, 0x21, 0x3b, 0x70,
-       0x40, 0x4b, 0xaa, 0x69,
-       0x00, 0xe2, 0x68, 0x59,
-       0x40, 0x4b, 0xaa, 0x69,
-       0x20, 0x4b, 0x96, 0x69,
+       0x40, 0x4b, 0xb4, 0x69,
+       0x00, 0xe2, 0x72, 0x59,
+       0x40, 0x4b, 0xb4, 0x69,
+       0x20, 0x4b, 0xa0, 0x69,
        0xfc, 0x42, 0x44, 0x78,
        0x10, 0x40, 0x44, 0x78,
-       0x00, 0xe2, 0xfc, 0x5d,
+       0x00, 0xe2, 0x10, 0x5e,
        0x20, 0x4d, 0x48, 0x78,
-       0x00, 0xe2, 0xfc, 0x5d,
+       0x00, 0xe2, 0x10, 0x5e,
        0x30, 0x3f, 0xc0, 0x09,
        0x30, 0xe0, 0x50, 0x60,
        0x7f, 0x4a, 0x94, 0x08,
@@ -51,7 +51,7 @@ static uint8_t seqprog[] = {
        0x00, 0xe2, 0x76, 0x58,
        0x00, 0xe2, 0x86, 0x58,
        0x00, 0xe2, 0x06, 0x40,
-       0x33, 0xea, 0x5e, 0x59,
+       0x33, 0xea, 0x68, 0x59,
        0x33, 0xea, 0x00, 0x00,
        0x01, 0x52, 0x84, 0x78,
        0x02, 0x58, 0x50, 0x31,
@@ -59,26 +59,26 @@ static uint8_t seqprog[] = {
        0xff, 0x97, 0x6f, 0x78,
        0x50, 0x4b, 0x6a, 0x68,
        0xbf, 0x3a, 0x74, 0x08,
-       0x14, 0xea, 0x64, 0x59,
+       0x14, 0xea, 0x6e, 0x59,
        0x14, 0xea, 0x04, 0x00,
        0x08, 0x92, 0x25, 0x03,
        0xff, 0x90, 0x5f, 0x68,
-       0x00, 0xe2, 0x76, 0x5b,
+       0x00, 0xe2, 0x8a, 0x5b,
        0x00, 0xe2, 0x5e, 0x40,
-       0x00, 0xea, 0x5e, 0x59,
+       0x00, 0xea, 0x68, 0x59,
        0x01, 0xea, 0x00, 0x30,
        0x80, 0xf9, 0x7e, 0x68,
-       0x00, 0xe2, 0x5c, 0x59,
-       0x11, 0xea, 0x5e, 0x59,
+       0x00, 0xe2, 0x66, 0x59,
+       0x11, 0xea, 0x68, 0x59,
        0x11, 0xea, 0x00, 0x00,
-       0x80, 0xf9, 0x5c, 0x79,
+       0x80, 0xf9, 0x66, 0x79,
        0xff, 0xea, 0xd4, 0x0d,
-       0x22, 0xea, 0x5e, 0x59,
+       0x22, 0xea, 0x68, 0x59,
        0x22, 0xea, 0x00, 0x00,
        0x10, 0x16, 0x90, 0x78,
        0x10, 0x16, 0x2c, 0x00,
        0x01, 0x0b, 0xae, 0x32,
-       0x18, 0xad, 0x12, 0x79,
+       0x18, 0xad, 0x1c, 0x79,
        0x04, 0xad, 0xdc, 0x68,
        0x80, 0xad, 0x84, 0x78,
        0x10, 0xad, 0xaa, 0x78,
@@ -118,7 +118,6 @@ static uint8_t seqprog[] = {
        0x80, 0x18, 0x30, 0x04,
        0x40, 0xad, 0x84, 0x78,
        0xe7, 0xad, 0x5a, 0x09,
-       0x02, 0xa8, 0x40, 0x31,
        0xff, 0xea, 0xc0, 0x09,
        0x01, 0x54, 0xa9, 0x1a,
        0x00, 0x55, 0xab, 0x22,
@@ -128,24 +127,30 @@ static uint8_t seqprog[] = {
        0xff, 0xea, 0x5a, 0x03,
        0xff, 0xea, 0x5e, 0x03,
        0x01, 0x10, 0xd4, 0x31,
-       0x10, 0x92, 0x07, 0x69,
+       0x02, 0xa8, 0x40, 0x31,
+       0x01, 0x92, 0xc1, 0x31,
        0x3d, 0x93, 0xc5, 0x29,
        0xfe, 0xe2, 0xc4, 0x09,
        0x01, 0xea, 0xc6, 0x01,
        0x02, 0xe2, 0xc8, 0x31,
        0x02, 0xec, 0x50, 0x31,
        0x02, 0xa0, 0xda, 0x31,
-       0xff, 0xa9, 0x06, 0x71,
+       0xff, 0xa9, 0x10, 0x71,
+       0x10, 0xe0, 0x0e, 0x79,
+       0x10, 0x92, 0x0f, 0x79,
+       0x01, 0x4d, 0x9b, 0x02,
+       0x02, 0xa0, 0xc0, 0x32,
+       0x01, 0x93, 0xc5, 0x36,
        0x02, 0xa0, 0x58, 0x37,
-       0xff, 0x21, 0x0f, 0x71,
+       0xff, 0x21, 0x19, 0x71,
        0x02, 0x22, 0x51, 0x31,
        0x02, 0xa0, 0x5c, 0x33,
        0x02, 0xa0, 0x44, 0x36,
        0x02, 0xa0, 0x40, 0x32,
        0x02, 0xa0, 0x44, 0x36,
-       0x04, 0x4d, 0x17, 0x69,
-       0x40, 0x16, 0x48, 0x69,
-       0xff, 0x2d, 0x4d, 0x61,
+       0x05, 0x4d, 0x21, 0x69,
+       0x40, 0x16, 0x52, 0x69,
+       0xff, 0x2d, 0x57, 0x61,
        0xff, 0x29, 0x85, 0x70,
        0x02, 0x28, 0x55, 0x32,
        0x01, 0xea, 0x5a, 0x01,
@@ -159,22 +164,22 @@ static uint8_t seqprog[] = {
        0x01, 0x56, 0xad, 0x1a,
        0xff, 0x54, 0xa9, 0x1a,
        0xff, 0x55, 0xab, 0x22,
-       0xff, 0x8d, 0x41, 0x71,
-       0x80, 0xac, 0x40, 0x71,
-       0x20, 0x16, 0x40, 0x69,
+       0xff, 0x8d, 0x4b, 0x71,
+       0x80, 0xac, 0x4a, 0x71,
+       0x20, 0x16, 0x4a, 0x69,
        0x00, 0xac, 0xc4, 0x19,
-       0x07, 0xe2, 0x40, 0xf9,
+       0x07, 0xe2, 0x4a, 0xf9,
        0x02, 0x8c, 0x51, 0x31,
-       0x00, 0xe2, 0x24, 0x41,
+       0x00, 0xe2, 0x2e, 0x41,
        0x01, 0xac, 0x08, 0x31,
        0x09, 0xea, 0x5a, 0x01,
        0x02, 0x8c, 0x51, 0x32,
        0xff, 0xea, 0x1a, 0x07,
        0x04, 0x24, 0xf9, 0x30,
-       0x1d, 0xea, 0x52, 0x41,
+       0x1d, 0xea, 0x5c, 0x41,
        0x02, 0x2c, 0x51, 0x31,
        0x04, 0xa8, 0xf9, 0x30,
-       0x19, 0xea, 0x52, 0x41,
+       0x19, 0xea, 0x5c, 0x41,
        0x06, 0xea, 0x08, 0x81,
        0x01, 0xe2, 0x5a, 0x35,
        0x02, 0xf2, 0xf0, 0x31,
@@ -190,27 +195,27 @@ static uint8_t seqprog[] = {
        0x02, 0x20, 0xb9, 0x30,
        0x02, 0x20, 0x51, 0x31,
        0x4c, 0x93, 0xd7, 0x28,
-       0x10, 0x92, 0x77, 0x79,
+       0x10, 0x92, 0x81, 0x79,
        0x01, 0x6b, 0xc0, 0x30,
        0x02, 0x64, 0xc8, 0x00,
        0x40, 0x3a, 0x74, 0x04,
        0x00, 0xe2, 0x76, 0x58,
-       0x33, 0xea, 0x5e, 0x59,
+       0x33, 0xea, 0x68, 0x59,
        0x33, 0xea, 0x00, 0x00,
        0x30, 0x3f, 0xc0, 0x09,
-       0x30, 0xe0, 0x78, 0x61,
-       0x20, 0x3f, 0x8e, 0x69,
-       0x10, 0x3f, 0x78, 0x79,
+       0x30, 0xe0, 0x82, 0x61,
+       0x20, 0x3f, 0x98, 0x69,
+       0x10, 0x3f, 0x82, 0x79,
        0x02, 0xea, 0x7e, 0x00,
-       0x00, 0xea, 0x5e, 0x59,
+       0x00, 0xea, 0x68, 0x59,
        0x01, 0xea, 0x00, 0x30,
        0x02, 0x4e, 0x51, 0x35,
        0x01, 0xea, 0x7e, 0x00,
-       0x11, 0xea, 0x5e, 0x59,
+       0x11, 0xea, 0x68, 0x59,
        0x11, 0xea, 0x00, 0x00,
        0x02, 0x4e, 0x51, 0x35,
        0xc0, 0x4a, 0x94, 0x00,
-       0x04, 0x41, 0x9c, 0x79,
+       0x04, 0x41, 0xa6, 0x79,
        0x08, 0xea, 0x98, 0x00,
        0x08, 0x57, 0xae, 0x00,
        0x08, 0x3c, 0x78, 0x00,
@@ -218,12 +223,12 @@ static uint8_t seqprog[] = {
        0x0f, 0x67, 0xc0, 0x09,
        0x00, 0x3a, 0x75, 0x02,
        0x20, 0xea, 0x96, 0x00,
-       0x00, 0xe2, 0x14, 0x42,
+       0x00, 0xe2, 0x28, 0x42,
        0xc0, 0x4a, 0x94, 0x00,
-       0x40, 0x3a, 0xc8, 0x69,
+       0x40, 0x3a, 0xd2, 0x69,
        0x02, 0x55, 0x06, 0x68,
-       0x02, 0x56, 0xc8, 0x69,
-       0xff, 0x5b, 0xc8, 0x61,
+       0x02, 0x56, 0xd2, 0x69,
+       0xff, 0x5b, 0xd2, 0x61,
        0x02, 0x20, 0x51, 0x31,
        0x80, 0xea, 0xb2, 0x01,
        0x44, 0xea, 0x00, 0x00,
@@ -231,40 +236,45 @@ static uint8_t seqprog[] = {
        0x33, 0xea, 0x00, 0x00,
        0xff, 0xea, 0xb2, 0x09,
        0xff, 0xe0, 0xc0, 0x19,
-       0xff, 0xe0, 0xca, 0x79,
+       0xff, 0xe0, 0xd4, 0x79,
        0x02, 0xac, 0x51, 0x31,
-       0x00, 0xe2, 0xc0, 0x41,
+       0x00, 0xe2, 0xca, 0x41,
        0x02, 0x5e, 0x50, 0x31,
        0x02, 0xa8, 0xb8, 0x30,
        0x02, 0x5c, 0x50, 0x31,
-       0xff, 0xad, 0xdb, 0x71,
+       0xff, 0xad, 0xe5, 0x71,
        0x02, 0xac, 0x41, 0x31,
        0x02, 0x22, 0x51, 0x31,
        0x02, 0xa0, 0x5c, 0x33,
        0x02, 0xa0, 0x44, 0x32,
-       0x00, 0xe2, 0xe4, 0x41,
-       0x10, 0x92, 0xe5, 0x69,
+       0x00, 0xe2, 0xf8, 0x41,
+       0x01, 0x4d, 0xf1, 0x79,
+       0x01, 0x62, 0xc1, 0x31,
+       0x00, 0x93, 0xf1, 0x61,
+       0xfe, 0x4d, 0x9b, 0x0a,
+       0x02, 0x60, 0x41, 0x31,
+       0x00, 0xe2, 0xdc, 0x41,
        0x3d, 0x93, 0xc9, 0x29,
        0x01, 0xe4, 0xc8, 0x01,
        0x01, 0xea, 0xca, 0x01,
        0xff, 0xea, 0xda, 0x01,
        0x02, 0x20, 0x51, 0x31,
        0x02, 0xae, 0x41, 0x32,
-       0xff, 0x21, 0xed, 0x61,
+       0xff, 0x21, 0x01, 0x62,
        0xff, 0xea, 0x46, 0x02,
        0x02, 0x5c, 0x50, 0x31,
        0x40, 0xea, 0x96, 0x00,
-       0x02, 0x56, 0x04, 0x6e,
-       0x01, 0x55, 0x04, 0x6e,
-       0x10, 0x92, 0xf9, 0x79,
-       0x10, 0x40, 0x02, 0x6a,
-       0x01, 0x56, 0x02, 0x7a,
+       0x02, 0x56, 0x20, 0x6e,
+       0x01, 0x55, 0x20, 0x6e,
+       0x10, 0x92, 0x0d, 0x7a,
+       0x10, 0x40, 0x16, 0x6a,
+       0x01, 0x56, 0x16, 0x7a,
        0xff, 0x97, 0x07, 0x78,
-       0x13, 0xea, 0x64, 0x59,
+       0x13, 0xea, 0x6e, 0x59,
        0x13, 0xea, 0x04, 0x00,
        0x00, 0xe2, 0x06, 0x40,
        0xbf, 0x3a, 0x74, 0x08,
-       0x04, 0x41, 0x08, 0x7a,
+       0x04, 0x41, 0x1c, 0x7a,
        0x08, 0xea, 0x98, 0x00,
        0x08, 0x57, 0xae, 0x00,
        0x01, 0x93, 0x75, 0x32,
@@ -272,108 +282,108 @@ static uint8_t seqprog[] = {
        0x40, 0xea, 0x72, 0x02,
        0x08, 0x3c, 0x78, 0x00,
        0x80, 0xea, 0x6e, 0x02,
-       0x00, 0xe2, 0xe2, 0x5b,
+       0x00, 0xe2, 0xf6, 0x5b,
        0x01, 0x3c, 0xc1, 0x31,
-       0x9f, 0xe0, 0x84, 0x7c,
-       0x80, 0xe0, 0x28, 0x72,
-       0xa0, 0xe0, 0x64, 0x72,
-       0xc0, 0xe0, 0x5a, 0x72,
-       0xe0, 0xe0, 0x94, 0x72,
-       0x01, 0xea, 0x64, 0x59,
+       0x9f, 0xe0, 0x98, 0x7c,
+       0x80, 0xe0, 0x3c, 0x72,
+       0xa0, 0xe0, 0x78, 0x72,
+       0xc0, 0xe0, 0x6e, 0x72,
+       0xe0, 0xe0, 0xa8, 0x72,
+       0x01, 0xea, 0x6e, 0x59,
        0x01, 0xea, 0x04, 0x00,
-       0x00, 0xe2, 0x14, 0x42,
-       0x80, 0x39, 0x2f, 0x7a,
-       0x03, 0xea, 0x64, 0x59,
+       0x00, 0xe2, 0x28, 0x42,
+       0x80, 0x39, 0x43, 0x7a,
+       0x03, 0xea, 0x6e, 0x59,
        0x03, 0xea, 0x04, 0x00,
-       0xee, 0x00, 0x36, 0x6a,
+       0xee, 0x00, 0x4a, 0x6a,
        0x05, 0xea, 0xb4, 0x00,
-       0x33, 0xea, 0x5e, 0x59,
+       0x33, 0xea, 0x68, 0x59,
        0x33, 0xea, 0x00, 0x00,
        0x02, 0xa8, 0x9c, 0x32,
-       0x00, 0xe2, 0x7e, 0x59,
+       0x00, 0xe2, 0x88, 0x59,
        0xef, 0x96, 0xd5, 0x19,
-       0x00, 0xe2, 0x46, 0x52,
+       0x00, 0xe2, 0x5a, 0x52,
        0x09, 0x80, 0xe1, 0x30,
        0x02, 0xea, 0x36, 0x00,
        0xa8, 0xea, 0x32, 0x00,
-       0x00, 0xe2, 0x4c, 0x42,
+       0x00, 0xe2, 0x60, 0x42,
        0x01, 0x96, 0xd1, 0x30,
        0x10, 0x80, 0x89, 0x31,
        0x20, 0xea, 0x32, 0x00,
        0xbf, 0x39, 0x73, 0x0a,
-       0x10, 0x4c, 0x56, 0x6a,
-       0x20, 0x19, 0x4e, 0x6a,
-       0x20, 0x19, 0x52, 0x6a,
-       0x02, 0x4d, 0x14, 0x6a,
+       0x10, 0x4c, 0x6a, 0x6a,
+       0x20, 0x19, 0x62, 0x6a,
+       0x20, 0x19, 0x66, 0x6a,
+       0x02, 0x4d, 0x28, 0x6a,
        0x40, 0x39, 0x73, 0x02,
-       0x00, 0xe2, 0x14, 0x42,
-       0x80, 0x39, 0xd5, 0x6a,
+       0x00, 0xe2, 0x28, 0x42,
+       0x80, 0x39, 0xe9, 0x6a,
        0x01, 0x44, 0x10, 0x33,
        0x08, 0x92, 0x25, 0x03,
-       0x00, 0xe2, 0x14, 0x42,
+       0x00, 0xe2, 0x28, 0x42,
        0x10, 0xea, 0x80, 0x00,
        0x01, 0x37, 0xc5, 0x31,
-       0x80, 0xe2, 0x80, 0x62,
-       0x10, 0x92, 0xa5, 0x6a,
+       0x80, 0xe2, 0x94, 0x62,
+       0x10, 0x92, 0xb9, 0x6a,
        0xc0, 0x94, 0xc5, 0x01,
-       0x40, 0x92, 0x71, 0x6a,
+       0x40, 0x92, 0x85, 0x6a,
        0xbf, 0xe2, 0xc4, 0x09,
-       0x20, 0x92, 0x85, 0x7a,
+       0x20, 0x92, 0x99, 0x7a,
        0x01, 0xe2, 0x88, 0x30,
-       0x00, 0xe2, 0xe2, 0x5b,
-       0xa0, 0x3c, 0x8d, 0x62,
+       0x00, 0xe2, 0xf6, 0x5b,
+       0xa0, 0x3c, 0xa1, 0x62,
        0x23, 0x92, 0x89, 0x08,
-       0x00, 0xe2, 0xe2, 0x5b,
-       0xa0, 0x3c, 0x8d, 0x62,
-       0x00, 0xa8, 0x84, 0x42,
-       0xff, 0xe2, 0x84, 0x62,
-       0x00, 0xe2, 0xa4, 0x42,
+       0x00, 0xe2, 0xf6, 0x5b,
+       0xa0, 0x3c, 0xa1, 0x62,
+       0x00, 0xa8, 0x98, 0x42,
+       0xff, 0xe2, 0x98, 0x62,
+       0x00, 0xe2, 0xb8, 0x42,
        0x40, 0xea, 0x98, 0x00,
        0x01, 0xe2, 0x88, 0x30,
-       0x00, 0xe2, 0xe2, 0x5b,
-       0xa0, 0x3c, 0x63, 0x72,
+       0x00, 0xe2, 0xf6, 0x5b,
+       0xa0, 0x3c, 0x77, 0x72,
        0x40, 0xea, 0x98, 0x00,
        0x01, 0x37, 0x95, 0x32,
        0x08, 0xea, 0x6e, 0x02,
-       0x00, 0xe2, 0x14, 0x42,
-       0xe0, 0xea, 0xfe, 0x5b,
-       0x80, 0xe0, 0xe0, 0x6a,
-       0x04, 0xe0, 0x92, 0x73,
-       0x02, 0xe0, 0xc4, 0x73,
-       0x00, 0xea, 0x3e, 0x73,
-       0x03, 0xe0, 0xd4, 0x73,
-       0x23, 0xe0, 0xb6, 0x72,
-       0x08, 0xe0, 0xdc, 0x72,
-       0x00, 0xe2, 0xe2, 0x5b,
-       0x07, 0xea, 0x64, 0x59,
+       0x00, 0xe2, 0x28, 0x42,
+       0xe0, 0xea, 0x12, 0x5c,
+       0x80, 0xe0, 0xf4, 0x6a,
+       0x04, 0xe0, 0xa6, 0x73,
+       0x02, 0xe0, 0xd8, 0x73,
+       0x00, 0xea, 0x52, 0x73,
+       0x03, 0xe0, 0xe8, 0x73,
+       0x23, 0xe0, 0xca, 0x72,
+       0x08, 0xe0, 0xf0, 0x72,
+       0x00, 0xe2, 0xf6, 0x5b,
+       0x07, 0xea, 0x6e, 0x59,
        0x07, 0xea, 0x04, 0x00,
-       0x08, 0x48, 0x15, 0x72,
-       0x04, 0x48, 0xb3, 0x62,
+       0x08, 0x48, 0x29, 0x72,
+       0x04, 0x48, 0xc7, 0x62,
        0x01, 0x49, 0x89, 0x30,
-       0x00, 0xe2, 0xa4, 0x42,
+       0x00, 0xe2, 0xb8, 0x42,
        0x01, 0x44, 0xd4, 0x31,
-       0x00, 0xe2, 0xa4, 0x42,
+       0x00, 0xe2, 0xb8, 0x42,
        0x01, 0x00, 0x6c, 0x32,
-       0x33, 0xea, 0x5e, 0x59,
+       0x33, 0xea, 0x68, 0x59,
        0x33, 0xea, 0x00, 0x00,
        0x4c, 0x3a, 0xc1, 0x28,
        0x01, 0x64, 0xc0, 0x31,
-       0x00, 0x36, 0x5f, 0x59,
+       0x00, 0x36, 0x69, 0x59,
        0x01, 0x36, 0x01, 0x30,
-       0x01, 0xe0, 0xda, 0x7a,
-       0xa0, 0xea, 0xf4, 0x5b,
-       0x01, 0xa0, 0xda, 0x62,
-       0x01, 0x84, 0xcf, 0x7a,
-       0x01, 0x95, 0xdd, 0x6a,
-       0x05, 0xea, 0x64, 0x59,
+       0x01, 0xe0, 0xee, 0x7a,
+       0xa0, 0xea, 0x08, 0x5c,
+       0x01, 0xa0, 0xee, 0x62,
+       0x01, 0x84, 0xe3, 0x7a,
+       0x01, 0x95, 0xf1, 0x6a,
+       0x05, 0xea, 0x6e, 0x59,
        0x05, 0xea, 0x04, 0x00,
-       0x00, 0xe2, 0xdc, 0x42,
-       0x03, 0xea, 0x64, 0x59,
+       0x00, 0xe2, 0xf0, 0x42,
+       0x03, 0xea, 0x6e, 0x59,
        0x03, 0xea, 0x04, 0x00,
-       0x00, 0xe2, 0xdc, 0x42,
-       0x07, 0xea, 0x06, 0x5c,
+       0x00, 0xe2, 0xf0, 0x42,
+       0x07, 0xea, 0x1a, 0x5c,
        0x01, 0x44, 0xd4, 0x31,
-       0x00, 0xe2, 0x14, 0x42,
+       0x00, 0xe2, 0x28, 0x42,
        0x3f, 0xe0, 0x76, 0x0a,
        0xc0, 0x3a, 0xc1, 0x09,
        0x00, 0x3b, 0x51, 0x01,
@@ -384,54 +394,54 @@ static uint8_t seqprog[] = {
        0x01, 0xea, 0xc6, 0x01,
        0x02, 0xe2, 0xc8, 0x31,
        0x02, 0xec, 0x40, 0x31,
-       0xff, 0xa1, 0xfc, 0x72,
+       0xff, 0xa1, 0x10, 0x73,
        0x02, 0xe8, 0xda, 0x31,
        0x02, 0xa0, 0x50, 0x31,
-       0x00, 0xe2, 0x1e, 0x43,
+       0x00, 0xe2, 0x32, 0x43,
        0x80, 0x39, 0x73, 0x02,
        0x01, 0x44, 0xd4, 0x31,
-       0x00, 0xe2, 0xe2, 0x5b,
+       0x00, 0xe2, 0xf6, 0x5b,
        0x01, 0x39, 0x73, 0x02,
-       0xe0, 0x3c, 0x39, 0x63,
+       0xe0, 0x3c, 0x4d, 0x63,
        0x02, 0x39, 0x73, 0x02,
-       0x20, 0x46, 0x32, 0x63,
+       0x20, 0x46, 0x46, 0x63,
        0xff, 0xea, 0x52, 0x09,
-       0xa8, 0xea, 0xf4, 0x5b,
-       0x04, 0x92, 0x19, 0x7b,
+       0xa8, 0xea, 0x08, 0x5c,
+       0x04, 0x92, 0x2d, 0x7b,
        0x01, 0x3a, 0xc1, 0x31,
-       0x00, 0x93, 0x19, 0x63,
+       0x00, 0x93, 0x2d, 0x63,
        0x01, 0x3b, 0xc1, 0x31,
-       0x00, 0x94, 0x23, 0x73,
+       0x00, 0x94, 0x37, 0x73,
        0x01, 0xa9, 0x52, 0x11,
-       0xff, 0xa9, 0x0e, 0x6b,
-       0x00, 0xe2, 0x32, 0x43,
+       0xff, 0xa9, 0x22, 0x6b,
+       0x00, 0xe2, 0x46, 0x43,
        0x10, 0x39, 0x73, 0x02,
-       0x04, 0x92, 0x33, 0x7b,
+       0x04, 0x92, 0x47, 0x7b,
        0xfb, 0x92, 0x25, 0x0b,
        0xff, 0xea, 0x72, 0x0a,
-       0x01, 0xa4, 0x2d, 0x6b,
+       0x01, 0xa4, 0x41, 0x6b,
        0x02, 0xa8, 0x9c, 0x32,
-       0x00, 0xe2, 0x7e, 0x59,
-       0x10, 0x92, 0xdd, 0x7a,
-       0xff, 0xea, 0x06, 0x5c,
-       0x00, 0xe2, 0xdc, 0x42,
-       0x04, 0xea, 0x64, 0x59,
+       0x00, 0xe2, 0x88, 0x59,
+       0x10, 0x92, 0xf1, 0x7a,
+       0xff, 0xea, 0x1a, 0x5c,
+       0x00, 0xe2, 0xf0, 0x42,
+       0x04, 0xea, 0x6e, 0x59,
        0x04, 0xea, 0x04, 0x00,
-       0x00, 0xe2, 0xdc, 0x42,
-       0x04, 0xea, 0x64, 0x59,
+       0x00, 0xe2, 0xf0, 0x42,
+       0x04, 0xea, 0x6e, 0x59,
        0x04, 0xea, 0x04, 0x00,
-       0x00, 0xe2, 0x14, 0x42,
-       0x08, 0x92, 0xd5, 0x7a,
-       0xc0, 0x39, 0x49, 0x7b,
-       0x80, 0x39, 0xd5, 0x6a,
-       0xff, 0x88, 0x49, 0x6b,
-       0x40, 0x39, 0xd5, 0x6a,
-       0x10, 0x92, 0x4f, 0x7b,
-       0x0a, 0xea, 0x64, 0x59,
+       0x00, 0xe2, 0x28, 0x42,
+       0x08, 0x92, 0xe9, 0x7a,
+       0xc0, 0x39, 0x5d, 0x7b,
+       0x80, 0x39, 0xe9, 0x6a,
+       0xff, 0x88, 0x5d, 0x6b,
+       0x40, 0x39, 0xe9, 0x6a,
+       0x10, 0x92, 0x63, 0x7b,
+       0x0a, 0xea, 0x6e, 0x59,
        0x0a, 0xea, 0x04, 0x00,
-       0x00, 0xe2, 0x6e, 0x5b,
-       0x00, 0xe2, 0xae, 0x43,
-       0x50, 0x4b, 0x56, 0x6b,
+       0x00, 0xe2, 0x82, 0x5b,
+       0x00, 0xe2, 0xc2, 0x43,
+       0x50, 0x4b, 0x6a, 0x6b,
        0xbf, 0x3a, 0x74, 0x08,
        0x01, 0xe0, 0xf4, 0x31,
        0xff, 0xea, 0xc0, 0x09,
@@ -441,31 +451,31 @@ static uint8_t seqprog[] = {
        0x01, 0xfa, 0xc0, 0x35,
        0x02, 0xa8, 0x90, 0x32,
        0x02, 0xea, 0xb4, 0x00,
-       0x33, 0xea, 0x5e, 0x59,
+       0x33, 0xea, 0x68, 0x59,
        0x33, 0xea, 0x00, 0x00,
        0x02, 0x48, 0x51, 0x31,
        0xff, 0x90, 0x85, 0x68,
-       0xff, 0x88, 0x7b, 0x6b,
-       0x01, 0xa4, 0x77, 0x6b,
-       0x02, 0xa4, 0x7f, 0x6b,
-       0x01, 0x84, 0x7f, 0x7b,
+       0xff, 0x88, 0x8f, 0x6b,
+       0x01, 0xa4, 0x8b, 0x6b,
+       0x02, 0xa4, 0x93, 0x6b,
+       0x01, 0x84, 0x93, 0x7b,
        0x02, 0x28, 0x19, 0x33,
        0x02, 0xa8, 0x50, 0x36,
-       0xff, 0x88, 0x7f, 0x73,
-       0x00, 0xe2, 0x52, 0x5b,
+       0xff, 0x88, 0x93, 0x73,
+       0x00, 0xe2, 0x66, 0x5b,
        0x02, 0xa8, 0x20, 0x33,
        0x04, 0xa4, 0x49, 0x03,
        0xff, 0xea, 0x1a, 0x03,
-       0xff, 0x2d, 0x8b, 0x63,
+       0xff, 0x2d, 0x9f, 0x63,
        0x02, 0xa8, 0x58, 0x32,
        0x02, 0xa8, 0x5c, 0x36,
        0x02, 0xa8, 0x40, 0x31,
        0x02, 0x2e, 0x51, 0x31,
        0x02, 0xa0, 0x18, 0x33,
        0x02, 0xa0, 0x5c, 0x36,
-       0xc0, 0x39, 0xd5, 0x6a,
+       0xc0, 0x39, 0xe9, 0x6a,
        0x04, 0x92, 0x25, 0x03,
-       0x20, 0x92, 0xaf, 0x6b,
+       0x20, 0x92, 0xc3, 0x6b,
        0x02, 0xa8, 0x40, 0x31,
        0xc0, 0x3a, 0xc1, 0x09,
        0x00, 0x3b, 0x51, 0x01,
@@ -480,60 +490,60 @@ static uint8_t seqprog[] = {
        0xf7, 0x57, 0xae, 0x08,
        0x08, 0xea, 0x98, 0x00,
        0x01, 0x44, 0xd4, 0x31,
-       0xee, 0x00, 0xb8, 0x6b,
+       0xee, 0x00, 0xcc, 0x6b,
        0x02, 0xea, 0xb4, 0x00,
        0xc0, 0xea, 0x72, 0x02,
-       0x09, 0x4c, 0xba, 0x7b,
+       0x09, 0x4c, 0xce, 0x7b,
        0x01, 0xea, 0x78, 0x02,
        0x08, 0x4c, 0x06, 0x68,
-       0x0b, 0xea, 0x64, 0x59,
+       0x0b, 0xea, 0x6e, 0x59,
        0x0b, 0xea, 0x04, 0x00,
        0x01, 0x44, 0xd4, 0x31,
-       0x20, 0x39, 0x15, 0x7a,
-       0x00, 0xe2, 0xcc, 0x5b,
-       0x00, 0xe2, 0x14, 0x42,
-       0x01, 0x84, 0xd1, 0x7b,
+       0x20, 0x39, 0x29, 0x7a,
+       0x00, 0xe2, 0xe0, 0x5b,
+       0x00, 0xe2, 0x28, 0x42,
+       0x01, 0x84, 0xe5, 0x7b,
        0x01, 0xa4, 0x49, 0x07,
        0x08, 0x60, 0x30, 0x33,
        0x08, 0x80, 0x41, 0x37,
        0xdf, 0x39, 0x73, 0x0a,
-       0xee, 0x00, 0xde, 0x6b,
+       0xee, 0x00, 0xf2, 0x6b,
        0x05, 0xea, 0xb4, 0x00,
-       0x33, 0xea, 0x5e, 0x59,
+       0x33, 0xea, 0x68, 0x59,
        0x33, 0xea, 0x00, 0x00,
-       0x00, 0xe2, 0x7e, 0x59,
-       0x00, 0xe2, 0xdc, 0x42,
-       0xff, 0x42, 0xee, 0x6b,
-       0x01, 0x41, 0xe2, 0x6b,
-       0x02, 0x41, 0xe2, 0x7b,
-       0xff, 0x42, 0xee, 0x6b,
-       0x01, 0x41, 0xe2, 0x6b,
-       0x02, 0x41, 0xe2, 0x7b,
-       0xff, 0x42, 0xee, 0x7b,
-       0x04, 0x4c, 0xe2, 0x6b,
+       0x00, 0xe2, 0x88, 0x59,
+       0x00, 0xe2, 0xf0, 0x42,
+       0xff, 0x42, 0x02, 0x6c,
+       0x01, 0x41, 0xf6, 0x6b,
+       0x02, 0x41, 0xf6, 0x7b,
+       0xff, 0x42, 0x02, 0x6c,
+       0x01, 0x41, 0xf6, 0x6b,
+       0x02, 0x41, 0xf6, 0x7b,
+       0xff, 0x42, 0x02, 0x7c,
+       0x04, 0x4c, 0xf6, 0x6b,
        0xe0, 0x41, 0x78, 0x0e,
        0x01, 0x44, 0xd4, 0x31,
-       0xff, 0x42, 0xf6, 0x7b,
-       0x04, 0x4c, 0xf6, 0x6b,
+       0xff, 0x42, 0x0a, 0x7c,
+       0x04, 0x4c, 0x0a, 0x6c,
        0xe0, 0x41, 0x78, 0x0a,
-       0xe0, 0x3c, 0x15, 0x62,
+       0xe0, 0x3c, 0x29, 0x62,
        0xff, 0xea, 0xca, 0x09,
        0x01, 0xe2, 0xc8, 0x31,
        0x01, 0x46, 0xda, 0x35,
        0x01, 0x44, 0xd4, 0x35,
        0x10, 0xea, 0x80, 0x00,
        0x01, 0xe2, 0x6e, 0x36,
-       0x04, 0xa6, 0x0e, 0x7c,
+       0x04, 0xa6, 0x22, 0x7c,
        0xff, 0xea, 0x5a, 0x09,
        0xff, 0xea, 0x4c, 0x0d,
-       0x01, 0xa6, 0x3a, 0x6c,
+       0x01, 0xa6, 0x4e, 0x6c,
        0x10, 0xad, 0x84, 0x78,
-       0x80, 0xad, 0x32, 0x6c,
+       0x80, 0xad, 0x46, 0x6c,
        0x08, 0xad, 0x84, 0x68,
-       0x20, 0x19, 0x26, 0x7c,
+       0x20, 0x19, 0x3a, 0x7c,
        0x80, 0xea, 0xb2, 0x01,
        0x11, 0x00, 0x00, 0x10,
-       0x02, 0xa6, 0x22, 0x7c,
+       0x02, 0xa6, 0x36, 0x7c,
        0xff, 0xea, 0xb2, 0x0d,
        0x11, 0x00, 0x00, 0x10,
        0xff, 0xea, 0xb2, 0x09,
@@ -561,7 +571,7 @@ static uint8_t seqprog[] = {
        0x00, 0x86, 0x0d, 0x23,
        0x00, 0x87, 0x0f, 0x23,
        0x01, 0x84, 0xc5, 0x31,
-       0x80, 0x83, 0x5d, 0x7c,
+       0x80, 0x83, 0x71, 0x7c,
        0x02, 0xe2, 0xc4, 0x01,
        0xff, 0xea, 0x4c, 0x09,
        0x01, 0xe2, 0x36, 0x30,
@@ -572,75 +582,75 @@ static uint8_t seqprog[] = {
        0xfe, 0xa6, 0x4c, 0x0d,
        0x0b, 0x98, 0xe1, 0x30,
        0xfd, 0xa4, 0x49, 0x09,
-       0x80, 0xa3, 0x71, 0x7c,
+       0x80, 0xa3, 0x85, 0x7c,
        0x02, 0xa4, 0x48, 0x01,
        0x01, 0xa4, 0x36, 0x30,
        0xa8, 0xea, 0x32, 0x00,
        0xfd, 0xa4, 0x49, 0x0b,
        0x05, 0xa3, 0x07, 0x33,
-       0x80, 0x83, 0x7d, 0x6c,
+       0x80, 0x83, 0x91, 0x6c,
        0x02, 0xea, 0x4c, 0x05,
        0xff, 0xea, 0x4c, 0x0d,
-       0x00, 0xe2, 0x56, 0x59,
-       0x02, 0xa6, 0x10, 0x6c,
+       0x00, 0xe2, 0x60, 0x59,
+       0x02, 0xa6, 0x24, 0x6c,
        0x80, 0xf9, 0xf2, 0x05,
-       0xc0, 0x39, 0x8b, 0x7c,
-       0x03, 0xea, 0x64, 0x59,
+       0xc0, 0x39, 0x9f, 0x7c,
+       0x03, 0xea, 0x6e, 0x59,
        0x03, 0xea, 0x04, 0x00,
-       0x20, 0x39, 0xaf, 0x7c,
-       0x01, 0x84, 0x95, 0x6c,
-       0x06, 0xea, 0x64, 0x59,
+       0x20, 0x39, 0xc3, 0x7c,
+       0x01, 0x84, 0xa9, 0x6c,
+       0x06, 0xea, 0x6e, 0x59,
        0x06, 0xea, 0x04, 0x00,
-       0x00, 0xe2, 0xb2, 0x44,
+       0x00, 0xe2, 0xc6, 0x44,
        0x01, 0x00, 0x6c, 0x32,
-       0xee, 0x00, 0x9e, 0x6c,
+       0xee, 0x00, 0xb2, 0x6c,
        0x05, 0xea, 0xb4, 0x00,
-       0x33, 0xea, 0x5e, 0x59,
+       0x33, 0xea, 0x68, 0x59,
        0x33, 0xea, 0x00, 0x00,
        0x80, 0x3d, 0x7a, 0x00,
-       0xfc, 0x42, 0xa0, 0x7c,
+       0xfc, 0x42, 0xb4, 0x7c,
        0x7f, 0x3d, 0x7a, 0x08,
-       0x00, 0x36, 0x5f, 0x59,
+       0x00, 0x36, 0x69, 0x59,
        0x01, 0x36, 0x01, 0x30,
-       0x09, 0xea, 0x64, 0x59,
+       0x09, 0xea, 0x6e, 0x59,
        0x09, 0xea, 0x04, 0x00,
-       0x00, 0xe2, 0x14, 0x42,
-       0x01, 0xa4, 0x95, 0x6c,
-       0x00, 0xe2, 0x68, 0x5c,
+       0x00, 0xe2, 0x28, 0x42,
+       0x01, 0xa4, 0xa9, 0x6c,
+       0x00, 0xe2, 0x7c, 0x5c,
        0x20, 0x39, 0x73, 0x02,
        0x01, 0x00, 0x6c, 0x32,
-       0x02, 0xa6, 0xba, 0x7c,
-       0x00, 0xe2, 0x7e, 0x5c,
+       0x02, 0xa6, 0xce, 0x7c,
+       0x00, 0xe2, 0x92, 0x5c,
        0x00, 0xe2, 0x76, 0x58,
        0x00, 0xe2, 0x86, 0x58,
        0x00, 0xe2, 0x5a, 0x58,
-       0x00, 0x36, 0x5f, 0x59,
+       0x00, 0x36, 0x69, 0x59,
        0x01, 0x36, 0x01, 0x30,
-       0x20, 0x19, 0xba, 0x6c,
-       0x00, 0xe2, 0xea, 0x5c,
-       0x04, 0x19, 0xd4, 0x6c,
+       0x20, 0x19, 0xce, 0x6c,
+       0x00, 0xe2, 0xfe, 0x5c,
+       0x04, 0x19, 0xe8, 0x6c,
        0x02, 0x19, 0x32, 0x00,
-       0x01, 0x84, 0xd5, 0x7c,
-       0x01, 0x1b, 0xce, 0x7c,
-       0x01, 0x1a, 0xd4, 0x6c,
-       0x00, 0xe2, 0x84, 0x44,
-       0x80, 0x4b, 0xda, 0x6c,
-       0x01, 0x4c, 0xd6, 0x7c,
-       0x03, 0x42, 0x84, 0x6c,
-       0x00, 0xe2, 0x0a, 0x5c,
+       0x01, 0x84, 0xe9, 0x7c,
+       0x01, 0x1b, 0xe2, 0x7c,
+       0x01, 0x1a, 0xe8, 0x6c,
+       0x00, 0xe2, 0x98, 0x44,
+       0x80, 0x4b, 0xee, 0x6c,
+       0x01, 0x4c, 0xea, 0x7c,
+       0x03, 0x42, 0x98, 0x6c,
+       0x00, 0xe2, 0x1e, 0x5c,
        0x80, 0xf9, 0xf2, 0x01,
-       0x04, 0x39, 0x15, 0x7a,
-       0x00, 0xe2, 0x14, 0x42,
-       0x08, 0x5d, 0xf2, 0x6c,
+       0x04, 0x39, 0x29, 0x7a,
+       0x00, 0xe2, 0x28, 0x42,
+       0x08, 0x5d, 0x06, 0x6d,
        0x00, 0xe2, 0x76, 0x58,
-       0x00, 0x36, 0x5f, 0x59,
+       0x00, 0x36, 0x69, 0x59,
        0x01, 0x36, 0x01, 0x30,
-       0x02, 0x1b, 0xe2, 0x7c,
-       0x08, 0x5d, 0xf0, 0x7c,
+       0x02, 0x1b, 0xf6, 0x7c,
+       0x08, 0x5d, 0x04, 0x7d,
        0x03, 0x68, 0x00, 0x37,
        0x01, 0x84, 0x09, 0x07,
-       0x80, 0x1b, 0xfc, 0x7c,
-       0x80, 0x84, 0xfd, 0x6c,
+       0x80, 0x1b, 0x10, 0x7d,
+       0x80, 0x84, 0x11, 0x6d,
        0xff, 0x85, 0x0b, 0x1b,
        0xff, 0x86, 0x0d, 0x23,
        0xff, 0x87, 0x0f, 0x23,
@@ -652,161 +662,164 @@ static uint8_t seqprog[] = {
        0xf9, 0xd9, 0xb2, 0x0d,
        0x01, 0xd9, 0xb2, 0x05,
        0x01, 0x52, 0x48, 0x31,
-       0x20, 0xa4, 0x26, 0x7d,
-       0x20, 0x5b, 0x26, 0x7d,
-       0x80, 0xf9, 0x34, 0x7d,
+       0x20, 0xa4, 0x3a, 0x7d,
+       0x20, 0x5b, 0x3a, 0x7d,
+       0x80, 0xf9, 0x48, 0x7d,
        0x02, 0xea, 0xb4, 0x00,
        0x11, 0x00, 0x00, 0x10,
-       0x04, 0x19, 0x40, 0x7d,
+       0x04, 0x19, 0x54, 0x7d,
        0xdf, 0x19, 0x32, 0x08,
-       0x60, 0x5b, 0x40, 0x6d,
-       0x01, 0x4c, 0x1a, 0x7d,
+       0x60, 0x5b, 0x54, 0x6d,
+       0x01, 0x4c, 0x2e, 0x7d,
        0x20, 0x19, 0x32, 0x00,
        0x01, 0xd9, 0xb2, 0x05,
        0x02, 0xea, 0xb4, 0x00,
        0x01, 0xd9, 0xb2, 0x05,
-       0x10, 0x5b, 0x38, 0x6d,
-       0x08, 0x5b, 0x42, 0x6d,
-       0x20, 0x5b, 0x32, 0x6d,
-       0x02, 0x5b, 0x62, 0x6d,
-       0x0e, 0xea, 0x64, 0x59,
+       0x10, 0x5b, 0x4c, 0x6d,
+       0x08, 0x5b, 0x56, 0x6d,
+       0x20, 0x5b, 0x46, 0x6d,
+       0x02, 0x5b, 0x76, 0x6d,
+       0x0e, 0xea, 0x6e, 0x59,
        0x0e, 0xea, 0x04, 0x00,
-       0x80, 0xf9, 0x22, 0x6d,
+       0x80, 0xf9, 0x36, 0x6d,
        0xdf, 0x5c, 0xb8, 0x08,
        0x01, 0xd9, 0xb2, 0x05,
-       0x01, 0xa4, 0x1d, 0x6e,
-       0x00, 0xe2, 0x68, 0x5c,
-       0x00, 0xe2, 0x6c, 0x5d,
+       0x01, 0xa4, 0x37, 0x6e,
+       0x00, 0xe2, 0x7c, 0x5c,
+       0x00, 0xe2, 0x80, 0x5d,
        0x01, 0x90, 0x21, 0x1b,
        0x01, 0xd9, 0xb2, 0x05,
-       0x00, 0xe2, 0x52, 0x5b,
+       0x00, 0xe2, 0x66, 0x5b,
        0xf3, 0x96, 0xd5, 0x19,
-       0x00, 0xe2, 0x50, 0x55,
-       0x80, 0x96, 0x51, 0x6d,
-       0x0f, 0xea, 0x64, 0x59,
+       0x00, 0xe2, 0x64, 0x55,
+       0x80, 0x96, 0x65, 0x6d,
+       0x0f, 0xea, 0x6e, 0x59,
        0x0f, 0xea, 0x04, 0x00,
-       0x00, 0xe2, 0x58, 0x45,
+       0x00, 0xe2, 0x6c, 0x45,
        0x04, 0x8c, 0xe1, 0x30,
        0x01, 0xea, 0xf2, 0x00,
        0x02, 0xea, 0x36, 0x00,
        0xa8, 0xea, 0x32, 0x00,
-       0xff, 0x97, 0x5f, 0x7d,
-       0x14, 0xea, 0x64, 0x59,
+       0xff, 0x97, 0x73, 0x7d,
+       0x14, 0xea, 0x6e, 0x59,
        0x14, 0xea, 0x04, 0x00,
-       0x00, 0xe2, 0xce, 0x5d,
+       0x00, 0xe2, 0xe2, 0x5d,
        0x01, 0xd9, 0xb2, 0x05,
        0x09, 0x80, 0xe1, 0x30,
        0x02, 0xea, 0x36, 0x00,
        0xa8, 0xea, 0x32, 0x00,
-       0x00, 0xe2, 0xc6, 0x5d,
+       0x00, 0xe2, 0xda, 0x5d,
        0x01, 0xd9, 0xb2, 0x05,
-       0x02, 0xa6, 0x7c, 0x7d,
-       0x00, 0xe2, 0x56, 0x59,
-       0x20, 0x5b, 0x8a, 0x6d,
-       0xfc, 0x42, 0x76, 0x7d,
-       0x10, 0x40, 0x78, 0x6d,
-       0x20, 0x4d, 0x7a, 0x7d,
-       0x08, 0x5d, 0x8a, 0x6d,
-       0x02, 0xa6, 0x10, 0x6c,
-       0x00, 0xe2, 0x56, 0x59,
-       0x20, 0x5b, 0x8a, 0x6d,
-       0x01, 0x1b, 0xaa, 0x6d,
-       0xfc, 0x42, 0x86, 0x7d,
-       0x10, 0x40, 0x88, 0x6d,
+       0x02, 0xa6, 0x90, 0x7d,
+       0x00, 0xe2, 0x60, 0x59,
+       0x20, 0x5b, 0x9e, 0x6d,
+       0xfc, 0x42, 0x8a, 0x7d,
+       0x10, 0x40, 0x8c, 0x6d,
+       0x20, 0x4d, 0x8e, 0x7d,
+       0x08, 0x5d, 0x9e, 0x6d,
+       0x02, 0xa6, 0x24, 0x6c,
+       0x00, 0xe2, 0x60, 0x59,
+       0x20, 0x5b, 0x9e, 0x6d,
+       0x01, 0x1b, 0xbe, 0x6d,
+       0xfc, 0x42, 0x9a, 0x7d,
+       0x10, 0x40, 0x9c, 0x6d,
        0x20, 0x4d, 0x84, 0x78,
        0x08, 0x5d, 0x84, 0x78,
        0x02, 0x19, 0x32, 0x00,
        0x01, 0x5b, 0x40, 0x31,
-       0x00, 0xe2, 0xea, 0x5c,
-       0x00, 0xe2, 0xcc, 0x5b,
+       0x00, 0xe2, 0xfe, 0x5c,
+       0x00, 0xe2, 0xe0, 0x5b,
        0x20, 0xea, 0xb6, 0x00,
-       0x00, 0xe2, 0x0a, 0x5c,
+       0x00, 0xe2, 0x1e, 0x5c,
        0x20, 0x5c, 0xb8, 0x00,
-       0x04, 0x19, 0xa0, 0x6d,
-       0x01, 0x1a, 0xa0, 0x6d,
-       0x00, 0xe2, 0x56, 0x59,
+       0x04, 0x19, 0xb4, 0x6d,
+       0x01, 0x1a, 0xb4, 0x6d,
+       0x00, 0xe2, 0x60, 0x59,
        0x01, 0x1a, 0x84, 0x78,
        0x80, 0xf9, 0xf2, 0x01,
-       0x20, 0xa0, 0x04, 0x7e,
+       0x20, 0xa0, 0x18, 0x7e,
        0xff, 0x90, 0x21, 0x1b,
-       0x08, 0x92, 0x63, 0x6b,
+       0x08, 0x92, 0x77, 0x6b,
        0x02, 0xea, 0xb4, 0x04,
        0x01, 0xa4, 0x49, 0x03,
-       0x40, 0x5b, 0xba, 0x6d,
-       0x00, 0xe2, 0x56, 0x59,
-       0x40, 0x5b, 0xba, 0x6d,
-       0x04, 0x5d, 0x1e, 0x7e,
-       0x01, 0x1a, 0x1e, 0x7e,
+       0x40, 0x5b, 0xce, 0x6d,
+       0x00, 0xe2, 0x60, 0x59,
+       0x40, 0x5b, 0xce, 0x6d,
+       0x04, 0x5d, 0x38, 0x7e,
+       0x01, 0x1a, 0x38, 0x7e,
        0x20, 0x4d, 0x84, 0x78,
-       0x40, 0x5b, 0x04, 0x7e,
-       0x04, 0x5d, 0x1e, 0x7e,
-       0x01, 0x1a, 0x1e, 0x7e,
+       0x40, 0x5b, 0x18, 0x7e,
+       0x04, 0x5d, 0x38, 0x7e,
+       0x01, 0x1a, 0x38, 0x7e,
        0x80, 0xf9, 0xf2, 0x01,
        0xff, 0x90, 0x21, 0x1b,
-       0x08, 0x92, 0x63, 0x6b,
+       0x08, 0x92, 0x77, 0x6b,
        0x02, 0xea, 0xb4, 0x04,
-       0x00, 0xe2, 0x56, 0x59,
+       0x00, 0xe2, 0x60, 0x59,
        0x01, 0x1b, 0x84, 0x78,
        0x80, 0xf9, 0xf2, 0x01,
        0x02, 0xea, 0xb4, 0x04,
-       0x00, 0xe2, 0x56, 0x59,
-       0x01, 0x1b, 0xe2, 0x6d,
-       0x40, 0x5b, 0xf0, 0x7d,
-       0x01, 0x1b, 0xe2, 0x6d,
+       0x00, 0xe2, 0x60, 0x59,
+       0x01, 0x1b, 0xf6, 0x6d,
+       0x40, 0x5b, 0x04, 0x7e,
+       0x01, 0x1b, 0xf6, 0x6d,
        0x02, 0x19, 0x32, 0x00,
        0x01, 0x1a, 0x84, 0x78,
        0x80, 0xf9, 0xf2, 0x01,
        0xff, 0xea, 0x10, 0x03,
        0x08, 0x92, 0x25, 0x03,
-       0x00, 0xe2, 0x62, 0x43,
-       0x01, 0x1a, 0xec, 0x7d,
-       0x40, 0x5b, 0xe8, 0x7d,
-       0x01, 0x1a, 0xd6, 0x6d,
+       0x00, 0xe2, 0x76, 0x43,
+       0x01, 0x1a, 0x00, 0x7e,
+       0x40, 0x5b, 0xfc, 0x7d,
+       0x01, 0x1a, 0xea, 0x6d,
        0xfc, 0x42, 0x84, 0x78,
-       0x01, 0x1a, 0xf0, 0x6d,
-       0x10, 0xea, 0x64, 0x59,
+       0x01, 0x1a, 0x04, 0x6e,
+       0x10, 0xea, 0x6e, 0x59,
        0x10, 0xea, 0x04, 0x00,
        0xfc, 0x42, 0x84, 0x78,
-       0x10, 0x40, 0xf6, 0x6d,
+       0x10, 0x40, 0x0a, 0x6e,
        0x20, 0x4d, 0x84, 0x78,
-       0x40, 0x5b, 0xd6, 0x6d,
+       0x40, 0x5b, 0xea, 0x6d,
        0x01, 0x1a, 0x84, 0x78,
        0x01, 0x90, 0x21, 0x1b,
        0x30, 0x3f, 0xc0, 0x09,
        0x30, 0xe0, 0x84, 0x60,
        0x40, 0x4b, 0x84, 0x68,
        0xff, 0xea, 0x52, 0x01,
-       0xee, 0x00, 0x0c, 0x6e,
+       0xee, 0x00, 0x20, 0x6e,
        0x80, 0xf9, 0xf2, 0x01,
        0xff, 0x90, 0x21, 0x1b,
        0x02, 0xea, 0xb4, 0x00,
        0x20, 0xea, 0x9a, 0x00,
-       0xf3, 0x42, 0x16, 0x6e,
-       0x12, 0xea, 0x64, 0x59,
+       0x04, 0x41, 0x26, 0x7e,
+       0x08, 0xea, 0x98, 0x00,
+       0x08, 0x57, 0xae, 0x00,
+       0xf3, 0x42, 0x30, 0x6e,
+       0x12, 0xea, 0x6e, 0x59,
        0x12, 0xea, 0x04, 0x00,
-       0x00, 0xe2, 0x14, 0x42,
-       0x0d, 0xea, 0x64, 0x59,
+       0x00, 0xe2, 0x28, 0x42,
+       0x0d, 0xea, 0x6e, 0x59,
        0x0d, 0xea, 0x04, 0x00,
-       0x00, 0xe2, 0x14, 0x42,
+       0x00, 0xe2, 0x28, 0x42,
        0x01, 0x90, 0x21, 0x1b,
-       0x11, 0xea, 0x64, 0x59,
+       0x11, 0xea, 0x6e, 0x59,
        0x11, 0xea, 0x04, 0x00,
-       0x00, 0xe2, 0x52, 0x5b,
+       0x00, 0xe2, 0x66, 0x5b,
        0x08, 0x5a, 0xb4, 0x00,
-       0x00, 0xe2, 0x44, 0x5e,
+       0x00, 0xe2, 0x5e, 0x5e,
        0xa8, 0xea, 0x32, 0x00,
-       0x00, 0xe2, 0x56, 0x59,
-       0x80, 0x1a, 0x32, 0x7e,
-       0x00, 0xe2, 0x44, 0x5e,
+       0x00, 0xe2, 0x60, 0x59,
+       0x80, 0x1a, 0x4c, 0x7e,
+       0x00, 0xe2, 0x5e, 0x5e,
        0x80, 0x19, 0x32, 0x00,
-       0x40, 0x5b, 0x38, 0x6e,
-       0x08, 0x5a, 0x38, 0x7e,
+       0x40, 0x5b, 0x52, 0x6e,
+       0x08, 0x5a, 0x52, 0x7e,
        0x20, 0x4d, 0x84, 0x78,
        0x02, 0x84, 0x09, 0x03,
-       0x40, 0x5b, 0x04, 0x7e,
+       0x40, 0x5b, 0x18, 0x7e,
        0xff, 0x90, 0x21, 0x1b,
        0x80, 0xf9, 0xf2, 0x01,
-       0x08, 0x92, 0x63, 0x6b,
+       0x08, 0x92, 0x77, 0x6b,
        0x02, 0xea, 0xb4, 0x04,
        0x01, 0x40, 0xe1, 0x30,
        0x05, 0x41, 0xe3, 0x98,
@@ -1039,138 +1052,138 @@ static struct patch {
        { ahd_patch0_func, 64, 1, 1 },
        { ahd_patch2_func, 67, 1, 2 },
        { ahd_patch0_func, 68, 1, 1 },
-       { ahd_patch4_func, 116, 1, 1 },
-       { ahd_patch2_func, 175, 3, 1 },
-       { ahd_patch1_func, 178, 2, 1 },
-       { ahd_patch5_func, 180, 1, 1 },
-       { ahd_patch2_func, 189, 1, 2 },
-       { ahd_patch0_func, 190, 1, 1 },
-       { ahd_patch6_func, 191, 2, 2 },
-       { ahd_patch0_func, 193, 6, 3 },
-       { ahd_patch2_func, 196, 1, 2 },
-       { ahd_patch0_func, 197, 1, 1 },
-       { ahd_patch2_func, 200, 1, 2 },
-       { ahd_patch0_func, 201, 1, 1 },
-       { ahd_patch3_func, 203, 1, 1 },
-       { ahd_patch7_func, 204, 3, 1 },
-       { ahd_patch3_func, 213, 1, 1 },
-       { ahd_patch5_func, 214, 16, 2 },
-       { ahd_patch0_func, 230, 1, 1 },
-       { ahd_patch8_func, 250, 2, 1 },
-       { ahd_patch1_func, 254, 1, 2 },
-       { ahd_patch0_func, 255, 1, 1 },
-       { ahd_patch7_func, 258, 3, 1 },
-       { ahd_patch1_func, 273, 1, 2 },
-       { ahd_patch0_func, 274, 1, 1 },
-       { ahd_patch1_func, 277, 1, 2 },
-       { ahd_patch0_func, 278, 1, 1 },
-       { ahd_patch2_func, 281, 1, 2 },
-       { ahd_patch0_func, 282, 1, 1 },
-       { ahd_patch9_func, 295, 2, 2 },
-       { ahd_patch0_func, 297, 1, 1 },
-       { ahd_patch1_func, 339, 1, 2 },
-       { ahd_patch0_func, 340, 1, 1 },
-       { ahd_patch2_func, 348, 1, 2 },
-       { ahd_patch0_func, 349, 1, 1 },
-       { ahd_patch2_func, 352, 1, 2 },
-       { ahd_patch0_func, 353, 1, 1 },
-       { ahd_patch1_func, 359, 1, 2 },
-       { ahd_patch0_func, 360, 1, 1 },
-       { ahd_patch1_func, 362, 1, 2 },
+       { ahd_patch4_func, 115, 1, 1 },
+       { ahd_patch2_func, 180, 3, 1 },
+       { ahd_patch1_func, 183, 2, 1 },
+       { ahd_patch5_func, 185, 1, 1 },
+       { ahd_patch2_func, 194, 1, 2 },
+       { ahd_patch0_func, 195, 1, 1 },
+       { ahd_patch6_func, 196, 2, 2 },
+       { ahd_patch0_func, 198, 6, 3 },
+       { ahd_patch2_func, 201, 1, 2 },
+       { ahd_patch0_func, 202, 1, 1 },
+       { ahd_patch2_func, 205, 1, 2 },
+       { ahd_patch0_func, 206, 1, 1 },
+       { ahd_patch3_func, 208, 1, 1 },
+       { ahd_patch7_func, 209, 3, 1 },
+       { ahd_patch3_func, 218, 1, 1 },
+       { ahd_patch5_func, 219, 16, 2 },
+       { ahd_patch0_func, 235, 1, 1 },
+       { ahd_patch8_func, 260, 2, 1 },
+       { ahd_patch1_func, 264, 1, 2 },
+       { ahd_patch0_func, 265, 1, 1 },
+       { ahd_patch7_func, 268, 3, 1 },
+       { ahd_patch1_func, 283, 1, 2 },
+       { ahd_patch0_func, 284, 1, 1 },
+       { ahd_patch1_func, 287, 1, 2 },
+       { ahd_patch0_func, 288, 1, 1 },
+       { ahd_patch2_func, 291, 1, 2 },
+       { ahd_patch0_func, 292, 1, 1 },
+       { ahd_patch9_func, 305, 2, 2 },
+       { ahd_patch0_func, 307, 1, 1 },
+       { ahd_patch1_func, 349, 1, 2 },
+       { ahd_patch0_func, 350, 1, 1 },
+       { ahd_patch2_func, 358, 1, 2 },
+       { ahd_patch0_func, 359, 1, 1 },
+       { ahd_patch2_func, 362, 1, 2 },
        { ahd_patch0_func, 363, 1, 1 },
-       { ahd_patch10_func, 382, 1, 1 },
-       { ahd_patch10_func, 385, 1, 1 },
-       { ahd_patch10_func, 387, 1, 1 },
-       { ahd_patch10_func, 399, 1, 1 },
-       { ahd_patch1_func, 409, 1, 2 },
-       { ahd_patch0_func, 410, 1, 1 },
-       { ahd_patch1_func, 412, 1, 2 },
-       { ahd_patch0_func, 413, 1, 1 },
-       { ahd_patch1_func, 421, 1, 2 },
-       { ahd_patch0_func, 422, 1, 1 },
-       { ahd_patch2_func, 435, 1, 2 },
-       { ahd_patch0_func, 436, 1, 1 },
-       { ahd_patch11_func, 472, 1, 1 },
-       { ahd_patch1_func, 480, 1, 2 },
-       { ahd_patch0_func, 481, 1, 1 },
-       { ahd_patch2_func, 493, 1, 2 },
-       { ahd_patch0_func, 494, 1, 1 },
-       { ahd_patch12_func, 497, 6, 2 },
-       { ahd_patch0_func, 503, 1, 1 },
-       { ahd_patch13_func, 524, 7, 1 },
-       { ahd_patch14_func, 533, 1, 1 },
-       { ahd_patch15_func, 542, 1, 1 },
-       { ahd_patch16_func, 543, 1, 2 },
-       { ahd_patch0_func, 544, 1, 1 },
-       { ahd_patch17_func, 547, 1, 1 },
-       { ahd_patch16_func, 548, 1, 1 },
-       { ahd_patch18_func, 559, 1, 2 },
-       { ahd_patch0_func, 560, 1, 1 },
-       { ahd_patch1_func, 579, 1, 2 },
-       { ahd_patch0_func, 580, 1, 1 },
-       { ahd_patch1_func, 583, 1, 2 },
-       { ahd_patch0_func, 584, 1, 1 },
-       { ahd_patch2_func, 589, 1, 2 },
+       { ahd_patch1_func, 369, 1, 2 },
+       { ahd_patch0_func, 370, 1, 1 },
+       { ahd_patch1_func, 372, 1, 2 },
+       { ahd_patch0_func, 373, 1, 1 },
+       { ahd_patch10_func, 392, 1, 1 },
+       { ahd_patch10_func, 395, 1, 1 },
+       { ahd_patch10_func, 397, 1, 1 },
+       { ahd_patch10_func, 409, 1, 1 },
+       { ahd_patch1_func, 419, 1, 2 },
+       { ahd_patch0_func, 420, 1, 1 },
+       { ahd_patch1_func, 422, 1, 2 },
+       { ahd_patch0_func, 423, 1, 1 },
+       { ahd_patch1_func, 431, 1, 2 },
+       { ahd_patch0_func, 432, 1, 1 },
+       { ahd_patch2_func, 445, 1, 2 },
+       { ahd_patch0_func, 446, 1, 1 },
+       { ahd_patch11_func, 482, 1, 1 },
+       { ahd_patch1_func, 490, 1, 2 },
+       { ahd_patch0_func, 491, 1, 1 },
+       { ahd_patch2_func, 503, 1, 2 },
+       { ahd_patch0_func, 504, 1, 1 },
+       { ahd_patch12_func, 507, 6, 2 },
+       { ahd_patch0_func, 513, 1, 1 },
+       { ahd_patch13_func, 534, 7, 1 },
+       { ahd_patch14_func, 543, 1, 1 },
+       { ahd_patch15_func, 552, 1, 1 },
+       { ahd_patch16_func, 553, 1, 2 },
+       { ahd_patch0_func, 554, 1, 1 },
+       { ahd_patch17_func, 557, 1, 1 },
+       { ahd_patch16_func, 558, 1, 1 },
+       { ahd_patch18_func, 569, 1, 2 },
+       { ahd_patch0_func, 570, 1, 1 },
+       { ahd_patch1_func, 589, 1, 2 },
        { ahd_patch0_func, 590, 1, 1 },
-       { ahd_patch2_func, 594, 1, 2 },
-       { ahd_patch0_func, 595, 1, 1 },
-       { ahd_patch1_func, 596, 1, 2 },
-       { ahd_patch0_func, 597, 1, 1 },
-       { ahd_patch2_func, 608, 1, 2 },
-       { ahd_patch0_func, 609, 1, 1 },
-       { ahd_patch19_func, 613, 1, 1 },
-       { ahd_patch20_func, 618, 1, 1 },
-       { ahd_patch21_func, 619, 2, 1 },
-       { ahd_patch20_func, 623, 1, 2 },
-       { ahd_patch0_func, 624, 1, 1 },
-       { ahd_patch2_func, 627, 1, 2 },
-       { ahd_patch0_func, 628, 1, 1 },
-       { ahd_patch2_func, 643, 1, 2 },
-       { ahd_patch0_func, 644, 1, 1 },
-       { ahd_patch13_func, 645, 14, 1 },
-       { ahd_patch1_func, 663, 1, 2 },
-       { ahd_patch0_func, 664, 1, 1 },
-       { ahd_patch13_func, 665, 1, 1 },
-       { ahd_patch1_func, 677, 1, 2 },
-       { ahd_patch0_func, 678, 1, 1 },
-       { ahd_patch1_func, 685, 1, 2 },
-       { ahd_patch0_func, 686, 1, 1 },
-       { ahd_patch19_func, 709, 1, 1 },
-       { ahd_patch19_func, 747, 1, 1 },
-       { ahd_patch1_func, 758, 1, 2 },
-       { ahd_patch0_func, 759, 1, 1 },
-       { ahd_patch1_func, 776, 1, 2 },
-       { ahd_patch0_func, 777, 1, 1 },
-       { ahd_patch1_func, 779, 1, 2 },
-       { ahd_patch0_func, 780, 1, 1 },
-       { ahd_patch1_func, 783, 1, 2 },
-       { ahd_patch0_func, 784, 1, 1 },
-       { ahd_patch22_func, 786, 1, 2 },
-       { ahd_patch0_func, 787, 2, 1 },
-       { ahd_patch23_func, 790, 4, 2 },
-       { ahd_patch0_func, 794, 1, 1 },
-       { ahd_patch23_func, 802, 11, 1 }
+       { ahd_patch1_func, 593, 1, 2 },
+       { ahd_patch0_func, 594, 1, 1 },
+       { ahd_patch2_func, 599, 1, 2 },
+       { ahd_patch0_func, 600, 1, 1 },
+       { ahd_patch2_func, 604, 1, 2 },
+       { ahd_patch0_func, 605, 1, 1 },
+       { ahd_patch1_func, 606, 1, 2 },
+       { ahd_patch0_func, 607, 1, 1 },
+       { ahd_patch2_func, 618, 1, 2 },
+       { ahd_patch0_func, 619, 1, 1 },
+       { ahd_patch19_func, 623, 1, 1 },
+       { ahd_patch20_func, 628, 1, 1 },
+       { ahd_patch21_func, 629, 2, 1 },
+       { ahd_patch20_func, 633, 1, 2 },
+       { ahd_patch0_func, 634, 1, 1 },
+       { ahd_patch2_func, 637, 1, 2 },
+       { ahd_patch0_func, 638, 1, 1 },
+       { ahd_patch2_func, 653, 1, 2 },
+       { ahd_patch0_func, 654, 1, 1 },
+       { ahd_patch13_func, 655, 14, 1 },
+       { ahd_patch1_func, 673, 1, 2 },
+       { ahd_patch0_func, 674, 1, 1 },
+       { ahd_patch13_func, 675, 1, 1 },
+       { ahd_patch1_func, 687, 1, 2 },
+       { ahd_patch0_func, 688, 1, 1 },
+       { ahd_patch1_func, 695, 1, 2 },
+       { ahd_patch0_func, 696, 1, 1 },
+       { ahd_patch19_func, 719, 1, 1 },
+       { ahd_patch19_func, 757, 1, 1 },
+       { ahd_patch1_func, 768, 1, 2 },
+       { ahd_patch0_func, 769, 1, 1 },
+       { ahd_patch7_func, 785, 3, 1 },
+       { ahd_patch1_func, 789, 1, 2 },
+       { ahd_patch0_func, 790, 1, 1 },
+       { ahd_patch1_func, 792, 1, 2 },
+       { ahd_patch0_func, 793, 1, 1 },
+       { ahd_patch1_func, 796, 1, 2 },
+       { ahd_patch0_func, 797, 1, 1 },
+       { ahd_patch22_func, 799, 1, 2 },
+       { ahd_patch0_func, 800, 2, 1 },
+       { ahd_patch23_func, 803, 4, 2 },
+       { ahd_patch0_func, 807, 1, 1 },
+       { ahd_patch23_func, 815, 11, 1 }
 };
 
 static struct cs {
        uint16_t        begin;
        uint16_t        end;
 } critical_sections[] = {
-       { 17, 28 },
-       { 29, 30 },
+       { 17, 30 },
        { 47, 58 },
        { 61, 63 },
        { 65, 66 },
        { 72, 92 },
-       { 110, 137 },
-       { 138, 175 },
-       { 180, 188 },
-       { 213, 264 },
-       { 425, 433 },
-       { 443, 445 },
-       { 448, 457 },
-       { 709, 739 },
-       { 749, 753 }
+       { 110, 142 },
+       { 143, 180 },
+       { 185, 193 },
+       { 218, 274 },
+       { 435, 443 },
+       { 453, 455 },
+       { 458, 467 },
+       { 719, 749 },
+       { 759, 763 }
 };
 
 static const int num_critical_sections = sizeof(critical_sections)
index f936b691232f90d4d020998d8a3420508b36cc85..924102720b141fe969f0891924fa1384e8269a2f 100644 (file)
@@ -37,7 +37,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm.c#22 $
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm.c#23 $
  *
  * $FreeBSD$
  */
@@ -609,10 +609,10 @@ output_listing(char *ifilename)
 
                while (line < cur_instr->srcline) {
                        fgets(buf, sizeof(buf), ifile);
-                               fprintf(listfile, "\t\t%s", buf);
+                               fprintf(listfile, "             \t%s", buf);
                                line++;
                }
-               fprintf(listfile, "%03x %02x%02x%02x%02x", instrptr,
+               fprintf(listfile, "%04x %02x%02x%02x%02x", instrptr,
 #ifdef __LITTLE_ENDIAN
                        cur_instr->format.bytes[0],
                        cur_instr->format.bytes[1],
@@ -624,14 +624,23 @@ output_listing(char *ifilename)
                        cur_instr->format.bytes[1],
                        cur_instr->format.bytes[0]);
 #endif
-               fgets(buf, sizeof(buf), ifile);
-               fprintf(listfile, "\t%s", buf);
-               line++;
+               /*
+                * Macro expansions can cause several instructions
+                * to be output for a single source line.  Only
+                * advance the line once in these cases.
+                */
+               if (line == cur_instr->srcline) {
+                       fgets(buf, sizeof(buf), ifile);
+                       fprintf(listfile, "\t%s", buf);
+                       line++;
+               } else {
+                       fprintf(listfile, "\n");
+               }
                instrptr++;
        }
        /* Dump the remainder of the file */
        while(fgets(buf, sizeof(buf), ifile) != NULL)
-               fprintf(listfile, "\t\t%s", buf);
+               fprintf(listfile, "             %s", buf);
 
        fclose(ifile);
 }
index 67e046d966254929af3e88849bb7c8baa88fcaf5..c328596def3c557f25c4b6bc88ec3bba94bfa3d8 100644 (file)
@@ -38,7 +38,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_gram.y#29 $
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_gram.y#30 $
  *
  * $FreeBSD$
  */
@@ -157,6 +157,8 @@ static int  is_download_const(expression_t *immed);
 
 %token T_END_CS
 
+%token T_PAD_PAGE
+
 %token T_FIELD
 
 %token T_ENUM
@@ -189,6 +191,10 @@ static int  is_download_const(expression_t *immed);
 
 %token <value> T_OR
 
+/* 16 bit extensions */
+%token <value> T_OR16 T_AND16 T_XOR16 T_ADD16
+%token <value> T_ADC16 T_MVI16 T_TEST16 T_CMP16 T_CMPXCHG
+
 %token T_RET
 
 %token T_NOP
@@ -207,7 +213,7 @@ static int  is_download_const(expression_t *immed);
 
 %type <expression> expression immediate immediate_or_a
 
-%type <value> export ret f1_opcode f2_opcode jmp_jc_jnc_call jz_jnz je_jne
+%type <value> export ret f1_opcode f2_opcode f4_opcode jmp_jc_jnc_call jz_jnz je_jne
 
 %type <value> mode_value mode_list macro_arglist
 
@@ -1304,6 +1310,15 @@ f2_opcode:
 |      T_ROR { $$ = AIC_OP_ROR; }
 ;
 
+f4_opcode:
+       T_OR16  { $$ = AIC_OP_OR16; }
+|      T_AND16 { $$ = AIC_OP_AND16; }
+|      T_XOR16 { $$ = AIC_OP_XOR16; }
+|      T_ADD16 { $$ = AIC_OP_ADD16; }
+|      T_ADC16 { $$ = AIC_OP_ADC16; }
+|      T_MVI16 { $$ = AIC_OP_MVI16; }
+;
+
 code:
        f2_opcode destination ',' expression opt_source ret ';'
        {
index e64f802bbaaa165884bafc117082eab38c23f701..9df9e2ce3538e27a711e7ad6ee4fc5aa595ccdf3 100644 (file)
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_insformat.h#11 $
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_insformat.h#12 $
  *
  * $FreeBSD$
  */
 
 #include <asm/byteorder.h>
 
+/* 8bit ALU logic operations */
 struct ins_format1 {
 #ifdef __LITTLE_ENDIAN
        uint32_t        immediate       : 8,
@@ -62,6 +63,7 @@ struct ins_format1 {
 #endif
 };
 
+/* 8bit ALU shift/rotate operations */
 struct ins_format2 {
 #ifdef __LITTLE_ENDIAN
        uint32_t        shift_control   : 8,
@@ -80,6 +82,7 @@ struct ins_format2 {
 #endif
 };
 
+/* 8bit branch control operations */
 struct ins_format3 {
 #ifdef __LITTLE_ENDIAN
        uint32_t        immediate       : 8,
@@ -96,10 +99,68 @@ struct ins_format3 {
 #endif
 };
 
+/* 16bit ALU logic operations */
+struct ins_format4 {
+#ifdef __LITTLE_ENDIAN
+       uint32_t        opcode_ext      : 8,
+                       source          : 9,
+                       destination     : 9,
+                       ret             : 1,
+                       opcode          : 4,
+                       parity          : 1;
+#else
+       uint32_t        parity          : 1,
+                       opcode          : 4,
+                       ret             : 1,
+                       destination     : 9,
+                       source          : 9,
+                       opcode_ext      : 8;
+#endif
+};
+
+/* 16bit branch control operations */
+struct ins_format5 {
+#ifdef __LITTLE_ENDIAN
+       uint32_t        opcode_ext      : 8,
+                       source          : 9,
+                       address         : 10,
+                       opcode          : 4,
+                       parity          : 1;
+#else
+       uint32_t        parity          : 1,
+                       opcode          : 4,
+                       address         : 10,
+                       source          : 9,
+                       opcode_ext      : 8;
+#endif
+};
+
+/*  Far branch operations */
+struct ins_format6 {
+#ifdef __LITTLE_ENDIAN
+       uint32_t        page            : 3,
+                       opcode_ext      : 5,
+                       source          : 9,
+                       address         : 10,
+                       opcode          : 4,
+                       parity          : 1;
+#else
+       uint32_t        parity          : 1,
+                       opcode          : 4,
+                       address         : 10,
+                       source          : 9,
+                       opcode_ext      : 5,
+                       page            : 3;
+#endif
+};
+
 union ins_formats {
                struct ins_format1 format1;
                struct ins_format2 format2;
                struct ins_format3 format3;
+               struct ins_format4 format4;
+               struct ins_format5 format5;
+               struct ins_format6 format6;
                uint8_t            bytes[4];
                uint32_t           integer;
 };
@@ -118,6 +179,8 @@ struct instruction {
 #define        AIC_OP_ROL      0x5
 #define        AIC_OP_BMOV     0x6
 
+#define        AIC_OP_MVI16    0x7
+
 #define        AIC_OP_JMP      0x8
 #define AIC_OP_JC      0x9
 #define AIC_OP_JNC     0xa
@@ -131,3 +194,26 @@ struct instruction {
 #define        AIC_OP_SHL      0x10
 #define        AIC_OP_SHR      0x20
 #define        AIC_OP_ROR      0x30
+
+/* 16bit Ops. Low byte main opcode.  High byte extended opcode. */ 
+#define        AIC_OP_OR16     0x8005
+#define        AIC_OP_AND16    0x8105
+#define        AIC_OP_XOR16    0x8205
+#define        AIC_OP_ADD16    0x8305
+#define        AIC_OP_ADC16    0x8405
+#define AIC_OP_JNE16   0x8805
+#define AIC_OP_JNZ16   0x8905
+#define AIC_OP_JE16    0x8C05
+#define AIC_OP_JZ16    0x8B05
+#define AIC_OP_JMP16   0x9005
+#define AIC_OP_JC16    0x9105
+#define AIC_OP_JNC16   0x9205
+#define AIC_OP_CALL16  0x9305
+#define AIC_OP_CALL16  0x9305
+
+/* Page extension is low three bits of second opcode byte. */
+#define AIC_OP_JMPF    0xA005
+#define AIC_OP_CALLF   0xB005
+#define AIC_OP_JCF     0xC005
+#define AIC_OP_JNCF    0xD005
+#define AIC_OP_CMPXCHG 0xE005
index 45c0b233d0bc239ba99acbf39b8765f7e8bce7da..7c3983f868a9a8c171d9d165438abf5f187be6fd 100644 (file)
@@ -38,7 +38,7 @@
  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGES.
  *
- * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_scan.l#19 $
+ * $Id: //depot/aic7xxx/aic7xxx/aicasm/aicasm_scan.l#20 $
  *
  * $FreeBSD$
  */
@@ -132,7 +132,7 @@ if[ \t]*\(          {
                                                *string_buf_ptr++ = *yptr++;
                                }
                        }
-
+else                   { return T_ELSE; }
 VERSION                        { return T_VERSION; }
 PREFIX                 { return T_PREFIX; }
 PATCH_ARG_LIST         { return T_PATCH_ARG_LIST; }
@@ -173,10 +173,6 @@ RW|RO|WO           {
                                        yylval.value = WO;
                                 return T_MODE;
                        }
-BEGIN_CRITICAL         { return T_BEGIN_CS; }
-END_CRITICAL           { return T_END_CS; }
-SET_SRC_MODE           { return T_SET_SRC_MODE; }
-SET_DST_MODE           { return T_SET_DST_MODE; }
 field                  { return T_FIELD; }
 enum                   { return T_ENUM; }
 mask                   { return T_MASK; }
@@ -192,6 +188,13 @@ none                       { return T_NONE; }
 sindex                 { return T_SINDEX; }
 A                      { return T_A; }
 
+       /* Instruction Formatting */
+PAD_PAGE               { return T_PAD_PAGE; }
+BEGIN_CRITICAL         { return T_BEGIN_CS; }
+END_CRITICAL           { return T_END_CS; }
+SET_SRC_MODE           { return T_SET_SRC_MODE; }
+SET_DST_MODE           { return T_SET_DST_MODE; }
+
        /* Opcodes */
 shl                    { return T_SHL; }
 shr                    { return T_SHR; }
@@ -223,7 +226,17 @@ and                        { return T_AND; }
 or                     { return T_OR;  }
 ret                    { return T_RET; }
 nop                    { return T_NOP; }
-else                   { return T_ELSE; }
+
+       /* ARP2 16bit extensions */
+or16                   { return T_OR16; }
+and16                  { return T_AND16; }
+xor16                  { return T_XOR16; }
+add16                  { return T_ADD16; }
+adc16                  { return T_ADC16; }
+mvi16                  { return T_MVI16; }
+test16                 { return T_TEST16; }
+cmp16                  { return T_CMP16; }
+cmpxchg                        { return T_CMPXCHG; }
 
        /* Allowed Symbols */
 \<\<                   { return T_EXPR_LSHIFT; }
index 557788ec4eec12cb09e51028bf6926363ff36589..fc3ca051ceed36dd431bcd0dab1dcf29dbc36cd9 100644 (file)
@@ -157,6 +157,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
        { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
        { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+       { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+       { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+       { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
 
        { }     /* terminate list */
 };
index c8a32cf47d738fba4e991d1c2f37e09c6a5b08c2..cbf825263f3b4a89512cf3ea18afc7aa74f92571 100644 (file)
@@ -246,6 +246,7 @@ struct ScsiReqBlk {
         * total_xfer_length in xferred. These values are restored in
         * pci_unmap_srb_sense. This is the only place xferred is used.
         */
+       unsigned char *virt_addr_req;   /* Saved virtual address of the request buffer */
        u32 xferred;                    /* Saved copy of total_xfer_length */
 
        u16 state;
@@ -2017,7 +2018,7 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
        sg_verify_length(srb);
 
        /* we need the corresponding virtual address */
-       if (!segment) {
+       if (!segment || (srb->flag & AUTO_REQSENSE)) {
                srb->virt_addr += xferred;
                return;
        }
@@ -3318,6 +3319,7 @@ static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
            srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
        srb->segment_x[0].length =
            srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
+       srb->virt_addr = srb->virt_addr_req;
 }
 
 
@@ -3711,6 +3713,8 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
        srb->xferred = srb->total_xfer_length;
        /* srb->segment_x : a one entry of S/G list table */
        srb->total_xfer_length = sizeof(cmd->sense_buffer);
+       srb->virt_addr_req = srb->virt_addr;
+       srb->virt_addr = cmd->sense_buffer;
        srb->segment_x[0].length = sizeof(cmd->sense_buffer);
        /* Map sense buffer */
        srb->segment_x[0].address =
index 822b9fa706f385f789d6cbbe125b971d586e4ec1..eaefeddb2b4ad48aa57e124a6eef2dfefeaf472a 100644 (file)
@@ -87,7 +87,7 @@ static int max_channel = 3;
 static int init_timeout = 5;
 static int max_requests = 50;
 
-#define IBMVSCSI_VERSION "1.5.7"
+#define IBMVSCSI_VERSION "1.5.8"
 
 MODULE_DESCRIPTION("IBM Virtual SCSI");
 MODULE_AUTHOR("Dave Boutcher");
@@ -534,7 +534,6 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
 static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
                                   struct ibmvscsi_host_data *hostdata)
 {
-       struct scsi_cmnd *cmnd;
        u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
        int rc;
 
@@ -544,19 +543,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
         * can handle more requests (can_queue) when we actually can't
         */
        if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) &&
-           (atomic_dec_if_positive(&hostdata->request_limit) < 0)) {
-               /* See if the adapter is disabled */
-               if (atomic_read(&hostdata->request_limit) < 0)
-                       goto send_error;
-       
-               printk(KERN_WARNING 
-                      "ibmvscsi: Warning, request_limit exceeded\n");
-               unmap_cmd_data(&evt_struct->iu.srp.cmd,
-                              evt_struct,
-                              hostdata->dev);
-               free_event_struct(&hostdata->pool, evt_struct);
-               return SCSI_MLQUEUE_HOST_BUSY;
-       }
+           (atomic_dec_if_positive(&hostdata->request_limit) < 0))
+               goto send_error;
 
        /* Copy the IU into the transfer area */
        *evt_struct->xfer_iu = evt_struct->iu;
@@ -572,7 +560,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
             ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
                list_del(&evt_struct->list);
 
-               printk(KERN_ERR "ibmvscsi: failed to send event struct rc %d\n",
+               printk(KERN_ERR "ibmvscsi: send error %d\n",
                       rc);
                goto send_error;
        }
@@ -582,14 +570,8 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
  send_error:
        unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
 
-       if ((cmnd = evt_struct->cmnd) != NULL) {
-               cmnd->result = DID_ERROR << 16;
-               evt_struct->cmnd_done(cmnd);
-       } else if (evt_struct->done)
-               evt_struct->done(evt_struct);
-       
        free_event_struct(&hostdata->pool, evt_struct);
-       return 0;
+       return SCSI_MLQUEUE_HOST_BUSY;
 }
 
 /**
@@ -802,7 +784,8 @@ static void login_rsp(struct srp_event_struct *evt_struct)
        case SRP_LOGIN_RSP_TYPE:        /* it worked! */
                break;
        case SRP_LOGIN_REJ_TYPE:        /* refused! */
-               printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REQ rejected\n");
+               printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n",
+                      evt_struct->xfer_iu->srp.login_rej.reason);
                /* Login failed.  */
                atomic_set(&hostdata->request_limit, -1);
                return;
@@ -834,6 +817,9 @@ static void login_rsp(struct srp_event_struct *evt_struct)
                return;
        }
 
+       /* If we had any pending I/Os, kick them */
+       scsi_unblock_requests(hostdata->host);
+
        send_mad_adapter_info(hostdata);
        return;
 }
@@ -862,6 +848,7 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
                          init_timeout * HZ);
 
        login = &evt_struct->iu.srp.login_req;
+       memset(login, 0x00, sizeof(struct srp_login_req));
        login->type = SRP_LOGIN_REQ_TYPE;
        login->max_requested_initiator_to_target_iulen = sizeof(union srp_iu);
        login->required_buffer_formats = 0x0006;
@@ -1122,7 +1109,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
  * purge_requests: Our virtual adapter just shut down.  purge any sent requests
  * @hostdata:    the adapter
  */
-static void purge_requests(struct ibmvscsi_host_data *hostdata)
+static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
 {
        struct srp_event_struct *tmp_evt, *pos;
        unsigned long flags;
@@ -1131,7 +1118,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata)
        list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
                list_del(&tmp_evt->list);
                if (tmp_evt->cmnd) {
-                       tmp_evt->cmnd->result = (DID_ERROR << 16);
+                       tmp_evt->cmnd->result = (error_code << 16);
                        unmap_cmd_data(&tmp_evt->iu.srp.cmd, 
                                       tmp_evt, 
                                       tmp_evt->hostdata->dev);
@@ -1186,12 +1173,30 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
                        printk(KERN_ERR "ibmvscsi: unknown crq message type\n");
                }
                return;
-       case 0xFF:              /* Hypervisor telling us the connection is closed */
-               printk(KERN_INFO "ibmvscsi: Virtual adapter failed!\n");
+       case 0xFF:      /* Hypervisor telling us the connection is closed */
+               scsi_block_requests(hostdata->host);
+               if (crq->format == 0x06) {
+                       /* We need to re-setup the interpartition connection */
+                       printk(KERN_INFO
+                              "ibmvscsi: Re-enabling adapter!\n");
+                       purge_requests(hostdata, DID_REQUEUE);
+                       if (ibmvscsi_reenable_crq_queue(&hostdata->queue,
+                                                       hostdata) == 0)
+                               if (ibmvscsi_send_crq(hostdata,
+                                                     0xC001000000000000LL, 0))
+                                       printk(KERN_ERR
+                                              "ibmvscsi: transmit error after"
+                                              " enable\n");
+               } else {
+                       printk(KERN_INFO
+                              "ibmvscsi: Virtual adapter failed rc %d!\n",
+                              crq->format);
 
-               atomic_set(&hostdata->request_limit, -1);
-               purge_requests(hostdata);
-               ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
+                       atomic_set(&hostdata->request_limit, -1);
+                       purge_requests(hostdata, DID_ERROR);
+                       ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
+               }
+               scsi_unblock_requests(hostdata->host);
                return;
        case 0x80:              /* real payload */
                break;
index 5b0edd1f19213e3d97c173c4659659e5c63b6891..4550d71e474475bec487075c96d1c7aed7df8d88 100644 (file)
@@ -103,6 +103,9 @@ void ibmvscsi_release_crq_queue(struct crq_queue *queue,
 int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
                              struct ibmvscsi_host_data *hostdata);
 
+int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
+                               struct ibmvscsi_host_data *hostdata);
+
 void ibmvscsi_handle_crq(struct viosrp_crq *crq,
                         struct ibmvscsi_host_data *hostdata);
 int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
index ce15d9e3962114f5f49d84df48de41814291ce79..7eed0b098171f6b37670ebe3db4b24689cfa68ad 100644 (file)
@@ -123,6 +123,19 @@ int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
        return 0;
 }
 
+/**
+ * reenable_crq_queue: - reenables a crq after a failure
+ * @queue:     crq_queue to initialize and register
+ * @hostdata:  ibmvscsi_host_data of host
+ *
+ * no-op for iSeries
+ */
+int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
+                               struct ibmvscsi_host_data *hostdata)
+{
+       return 0;
+}
+
 /**
  * ibmvscsi_send_crq: - Send a CRQ
  * @hostdata:  the adapter
index 75db2f5c545e999d2b06b313560bf3610447bdd5..f47dd87c05e7566bcb58e054fbcfcbd7eeb06613 100644 (file)
@@ -280,6 +280,28 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
        return -1;
 }
 
+/**
+ * reenable_crq_queue: - reenables a crq after
+ * @queue:     crq_queue to initialize and register
+ * @hostdata:  ibmvscsi_host_data of host
+ *
+ */
+int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
+                                struct ibmvscsi_host_data *hostdata)
+{
+       int rc;
+       struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+
+       /* Re-enable the CRQ */
+       do {
+               rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
+       } while ((rc == H_InProgress) || (rc == H_Busy) || (H_isLongBusy(rc)));
+
+       if (rc)
+               printk(KERN_ERR "ibmvscsi: Error %d enabling adapter\n", rc);
+       return rc;
+}
+
 /**
  * reset_crq_queue: - resets a crq after a failure
  * @queue:     crq_queue to initialize and register
index e5e1ca44e1eea831e17814c84d81db91b62be2cf..86c546164da9f1dc78b98d4f2982e9b45d602d0c 100644 (file)
@@ -3499,6 +3499,7 @@ ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
        int device_error;
        uint32_t transfer_len;
        IPS_DCDB_TABLE_TAPE *tapeDCDB;
+       IPS_SCSI_INQ_DATA inquiryData;
 
        METHOD_TRACE("ips_map_status", 1);
 
@@ -3557,13 +3558,13 @@ ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
                                errcode = DID_OK;
 
                                /* Restrict access to physical DASD */
-                               if ((scb->scsi_cmd->cmnd[0] == INQUIRY) &&
-                                   ((((char *) scb->scsi_cmd->
-                                      buffer)[0] & 0x1f) == TYPE_DISK)) {
-                                       /* underflow -- no error               */
-                                       /* restrict access to physical DASD    */
-                                       errcode = DID_TIME_OUT;
-                                       break;
+                               if (scb->scsi_cmd->cmnd[0] == INQUIRY) {
+                                   ips_scmd_buf_read(scb->scsi_cmd, 
+                                      &inquiryData, sizeof (inquiryData));
+                                   if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) {
+                                       errcode = DID_TIME_OUT;
+                                       break;
+                                   }
                                }
                        } else
                                errcode = DID_ERROR;
@@ -4135,6 +4136,7 @@ ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
        uint8_t basic_status;
        uint8_t ext_status;
        int errcode;
+       IPS_SCSI_INQ_DATA inquiryData;
 
        METHOD_TRACE("ips_chkstatus", 1);
 
@@ -4255,11 +4257,11 @@ ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
                        scb->scsi_cmd->result = errcode << 16;
                } else {        /* bus == 0 */
                        /* restrict access to physical drives */
-                       if ((scb->scsi_cmd->cmnd[0] == INQUIRY) &&
-                           ((((char *) scb->scsi_cmd->buffer)[0] & 0x1f) ==
-                            TYPE_DISK)) {
-
-                               scb->scsi_cmd->result = DID_TIME_OUT << 16;
+                       if (scb->scsi_cmd->cmnd[0] == INQUIRY) { 
+                           ips_scmd_buf_read(scb->scsi_cmd, 
+                                  &inquiryData, sizeof (inquiryData));
+                           if ((inquiryData.DeviceType & 0x1f) == TYPE_DISK) 
+                               scb->scsi_cmd->result = DID_TIME_OUT << 16;
                        }
                }               /* else */
        } else {                /* recovered error / success */
@@ -5012,7 +5014,7 @@ ips_init_copperhead(ips_ha_t * ha)
                                break;
 
                        /* Delay for 1 Second */
-                       MDELAY(IPS_ONE_SEC);
+                       msleep(IPS_ONE_SEC);
                }
 
                if (j >= 45)
@@ -5038,7 +5040,7 @@ ips_init_copperhead(ips_ha_t * ha)
                                break;
 
                        /* Delay for 1 Second */
-                       MDELAY(IPS_ONE_SEC);
+                       msleep(IPS_ONE_SEC);
                }
 
                if (j >= 240)
@@ -5056,7 +5058,7 @@ ips_init_copperhead(ips_ha_t * ha)
                        break;
 
                /* Delay for 1 Second */
-               MDELAY(IPS_ONE_SEC);
+               msleep(IPS_ONE_SEC);
        }
 
        if (i >= 240)
@@ -5106,7 +5108,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
                                break;
 
                        /* Delay for 1 Second */
-                       MDELAY(IPS_ONE_SEC);
+                       msleep(IPS_ONE_SEC);
                }
 
                if (j >= 45)
@@ -5132,7 +5134,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
                                break;
 
                        /* Delay for 1 Second */
-                       MDELAY(IPS_ONE_SEC);
+                       msleep(IPS_ONE_SEC);
                }
 
                if (j >= 240)
@@ -5150,7 +5152,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
                        break;
 
                /* Delay for 1 Second */
-               MDELAY(IPS_ONE_SEC);
+               msleep(IPS_ONE_SEC);
        }
 
        if (i >= 240)
@@ -5202,7 +5204,7 @@ ips_init_morpheus(ips_ha_t * ha)
                        break;
 
                /* Delay for 1 Second */
-               MDELAY(IPS_ONE_SEC);
+               msleep(IPS_ONE_SEC);
        }
 
        if (i >= 45) {
@@ -5228,7 +5230,7 @@ ips_init_morpheus(ips_ha_t * ha)
                        if (Post != 0x4F00)
                                break;
                        /* Delay for 1 Second */
-                       MDELAY(IPS_ONE_SEC);
+                       msleep(IPS_ONE_SEC);
                }
 
                if (i >= 120) {
@@ -5258,7 +5260,7 @@ ips_init_morpheus(ips_ha_t * ha)
                        break;
 
                /* Delay for 1 Second */
-               MDELAY(IPS_ONE_SEC);
+               msleep(IPS_ONE_SEC);
        }
 
        if (i >= 240) {
@@ -5318,12 +5320,12 @@ ips_reset_copperhead(ips_ha_t * ha)
                outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
 
                /* Delay for 1 Second */
-               MDELAY(IPS_ONE_SEC);
+               msleep(IPS_ONE_SEC);
 
                outb(0, ha->io_addr + IPS_REG_SCPR);
 
                /* Delay for 1 Second */
-               MDELAY(IPS_ONE_SEC);
+               msleep(IPS_ONE_SEC);
 
                if ((*ha->func.init) (ha))
                        break;
@@ -5363,12 +5365,12 @@ ips_reset_copperhead_memio(ips_ha_t * ha)
                writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
 
                /* Delay for 1 Second */
-               MDELAY(IPS_ONE_SEC);
+               msleep(IPS_ONE_SEC);
 
                writeb(0, ha->mem_ptr + IPS_REG_SCPR);
 
                /* Delay for 1 Second */
-               MDELAY(IPS_ONE_SEC);
+               msleep(IPS_ONE_SEC);
 
                if ((*ha->func.init) (ha))
                        break;
@@ -5409,7 +5411,7 @@ ips_reset_morpheus(ips_ha_t * ha)
                writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
 
                /* Delay for 5 Seconds */
-               MDELAY(5 * IPS_ONE_SEC);
+               msleep(5 * IPS_ONE_SEC);
 
                /* Do a PCI config read to wait for adapter */
                pci_read_config_byte(ha->pcidev, 4, &junk);
index 99bae8369ab294b3983517a799b3e1738bcc762d..46c4cdbaee86dc26114299c2a1ccfa45150b9c39 100644 (file)
@@ -611,6 +611,10 @@ int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
        if (dev->flags & ATA_DFLAG_PIO) {
                tf->protocol = ATA_PROT_PIO;
                index = dev->multi_count ? 0 : 8;
+       } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
+               /* Unable to use DMA due to host limitation */
+               tf->protocol = ATA_PROT_PIO;
+               index = dev->multi_count ? 0 : 4;
        } else {
                tf->protocol = ATA_PROT_DMA;
                index = 16;
@@ -1051,18 +1055,22 @@ static unsigned int ata_pio_modes(const struct ata_device *adev)
 {
        u16 modes;
 
-       /* Usual case. Word 53 indicates word 88 is valid */
-       if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) {
+       /* Usual case. Word 53 indicates word 64 is valid */
+       if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) {
                modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
                modes <<= 3;
                modes |= 0x7;
                return modes;
        }
 
-       /* If word 88 isn't valid then Word 51 holds the PIO timing number
-          for the maximum. Turn it into a mask and return it */
-       modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
+       /* If word 64 isn't valid then Word 51 high byte holds the PIO timing
+          number for the maximum. Turn it into a mask and return it */
+       modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ;
        return modes;
+       /* But wait.. there's more. Design your standards by committee and
+          you too can get a free iordy field to process. However its the 
+          speeds not the modes that are supported... Note drivers using the
+          timing API will get this right anyway */
 }
 
 struct ata_exec_internal_arg {
@@ -1164,6 +1172,39 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
        return AC_ERR_OTHER;
 }
 
+/**
+ *     ata_pio_need_iordy      -       check if iordy needed
+ *     @adev: ATA device
+ *
+ *     Check if the current speed of the device requires IORDY. Used
+ *     by various controllers for chip configuration.
+ */
+
+unsigned int ata_pio_need_iordy(const struct ata_device *adev)
+{
+       int pio;
+       int speed = adev->pio_mode - XFER_PIO_0;
+
+       if (speed < 2)
+               return 0;
+       if (speed > 2)
+               return 1;
+               
+       /* If we have no drive specific rule, then PIO 2 is non IORDY */
+
+       if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
+               pio = adev->id[ATA_ID_EIDE_PIO];
+               /* Is the speed faster than the drive allows non IORDY ? */
+               if (pio) {
+                       /* This is cycle times not frequency - watch the logic! */
+                       if (pio > 240)  /* PIO2 is 240nS per cycle */
+                               return 1;
+                       return 0;
+               }
+       }
+       return 0;
+}
+
 /**
  *     ata_dev_identify - obtain IDENTIFY x DEVICE page
  *     @ap: port on which device we wish to probe resides
@@ -1415,7 +1456,7 @@ void ata_dev_config(struct ata_port *ap, unsigned int i)
                ap->udma_mask &= ATA_UDMA5;
                ap->host->max_sectors = ATA_MAX_SECTORS;
                ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
-               ap->device->flags |= ATA_DFLAG_LOCK_SECTORS;
+               ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
        }
 
        if (ap->ops->dev_config)
@@ -3056,10 +3097,21 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
                          unsigned int buflen, int do_write)
 {
-       if (ap->flags & ATA_FLAG_MMIO)
-               ata_mmio_data_xfer(ap, buf, buflen, do_write);
-       else
-               ata_pio_data_xfer(ap, buf, buflen, do_write);
+       /* Make the crap hardware pay the costs not the good stuff */
+       if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
+               unsigned long flags;
+               local_irq_save(flags);
+               if (ap->flags & ATA_FLAG_MMIO)
+                       ata_mmio_data_xfer(ap, buf, buflen, do_write);
+               else
+                       ata_pio_data_xfer(ap, buf, buflen, do_write);
+               local_irq_restore(flags);
+       } else {
+               if (ap->flags & ATA_FLAG_MMIO)
+                       ata_mmio_data_xfer(ap, buf, buflen, do_write);
+               else
+                       ata_pio_data_xfer(ap, buf, buflen, do_write);
+       }
 }
 
 /**
@@ -5122,6 +5174,7 @@ EXPORT_SYMBOL_GPL(ata_dev_id_string);
 EXPORT_SYMBOL_GPL(ata_dev_config);
 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
 
+EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
 EXPORT_SYMBOL_GPL(ata_timing_compute);
 EXPORT_SYMBOL_GPL(ata_timing_merge);
 
index cfbceb5047183e3096bb494bc3e2419c8956cd93..07b1e7cc61dfcbc0ab5ce762be6d83ecb4f1f1db 100644 (file)
@@ -1700,6 +1700,31 @@ static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
        return sizeof(def_rw_recovery_mpage);
 }
 
+/*
+ * We can turn this into a real blacklist if it's needed, for now just
+ * blacklist any Maxtor BANC1G10 revision firmware
+ */
+static int ata_dev_supports_fua(u16 *id)
+{
+       unsigned char model[41], fw[9];
+
+       if (!ata_id_has_fua(id))
+               return 0;
+
+       model[40] = '\0';
+       fw[8] = '\0';
+
+       ata_dev_id_string(id, model, ATA_ID_PROD_OFS, sizeof(model) - 1);
+       ata_dev_id_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw) - 1);
+
+       if (strncmp(model, "Maxtor", 6))
+               return 1;
+       if (strncmp(fw, "BANC1G10", 8))
+               return 1;
+
+       return 0; /* blacklisted */
+}
+
 /**
  *     ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
  *     @args: device IDENTIFY data / SCSI command of interest.
@@ -1797,7 +1822,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
                return 0;
 
        dpofua = 0;
-       if (ata_id_has_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 &&
+       if (ata_dev_supports_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 &&
            (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
                dpofua = 1 << 4;
 
index 511ed52a580747be705be11e3f1d9dce325af7c8..a487f414960e5e9a6553c7c1b7dc7fe2042253c2 100644 (file)
@@ -10,7 +10,7 @@
  *        2 of the License, or (at your option) any later version.
  *
  * FILE                : megaraid_sas.c
- * Version     : v00.00.02.00-rc4
+ * Version     : v00.00.02.02
  *
  * Authors:
  *     Sreenivas Bagalkote     <Sreenivas.Bagalkote@lsil.com>
@@ -55,13 +55,13 @@ static struct pci_device_id megasas_pci_table[] = {
 
        {
         PCI_VENDOR_ID_LSI_LOGIC,
-        PCI_DEVICE_ID_LSI_SAS1064R,
+        PCI_DEVICE_ID_LSI_SAS1064R, // xscale IOP
         PCI_ANY_ID,
         PCI_ANY_ID,
         },
        {
         PCI_VENDOR_ID_DELL,
-        PCI_DEVICE_ID_DELL_PERC5,
+        PCI_DEVICE_ID_DELL_PERC5, // xscale IOP
         PCI_ANY_ID,
         PCI_ANY_ID,
         },
@@ -119,12 +119,18 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
        spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
 }
 
+
+/**
+*      The following functions are defined for xscale 
+*      (deviceid : 1064R, PERC5) controllers
+*/
+
 /**
- * megasas_enable_intr -       Enables interrupts
+ * megasas_enable_intr_xscale -        Enables interrupts
  * @regs:                      MFI register set
  */
 static inline void
-megasas_enable_intr(struct megasas_register_set __iomem * regs)
+megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs)
 {
        writel(1, &(regs)->outbound_intr_mask);
 
@@ -132,6 +138,66 @@ megasas_enable_intr(struct megasas_register_set __iomem * regs)
        readl(&regs->outbound_intr_mask);
 }
 
+/**
+ * megasas_read_fw_status_reg_xscale - returns the current FW status value
+ * @regs:                      MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
+{
+       return readl(&(regs)->outbound_msg_0);
+}
+/**
+ * megasas_clear_interrupt_xscale -    Check & clear interrupt
+ * @regs:                              MFI register set
+ */
+static int 
+megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
+{
+       u32 status;
+       /*
+        * Check if it is our interrupt
+        */
+       status = readl(&regs->outbound_intr_status);
+
+       if (!(status & MFI_OB_INTR_STATUS_MASK)) {
+               return 1;
+       }
+
+       /*
+        * Clear the interrupt by writing back the same value
+        */
+       writel(status, &regs->outbound_intr_status);
+
+       return 0;
+}
+
+/**
+ * megasas_fire_cmd_xscale -   Sends command to the FW
+ * @frame_phys_addr :          Physical address of cmd
+ * @frame_count :              Number of frames for the command
+ * @regs :                     MFI register set
+ */
+static inline void 
+megasas_fire_cmd_xscale(dma_addr_t frame_phys_addr,u32 frame_count, struct megasas_register_set __iomem *regs)
+{
+       writel((frame_phys_addr >> 3)|(frame_count),
+              &(regs)->inbound_queue_port);
+}
+
+static struct megasas_instance_template megasas_instance_template_xscale = {
+
+       .fire_cmd = megasas_fire_cmd_xscale,
+       .enable_intr = megasas_enable_intr_xscale,
+       .clear_intr = megasas_clear_intr_xscale,
+       .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
+};
+
+/**
+*      This is the end of set of functions & definitions specific 
+*      to xscale (deviceid : 1064R, PERC5) controllers
+*/
+
 /**
  * megasas_disable_intr -      Disables interrupts
  * @regs:                      MFI register set
@@ -139,7 +205,7 @@ megasas_enable_intr(struct megasas_register_set __iomem * regs)
 static inline void
 megasas_disable_intr(struct megasas_register_set __iomem * regs)
 {
-       u32 mask = readl(&regs->outbound_intr_mask) & (~0x00000001);
+       u32 mask = 0x1f; 
        writel(mask, &regs->outbound_intr_mask);
 
        /* Dummy readl to force pci flush */
@@ -167,8 +233,7 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
        /*
         * Issue the frame using inbound queue port
         */
-       writel(cmd->frame_phys_addr >> 3,
-              &instance->reg_set->inbound_queue_port);
+       instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
 
        /*
         * Wait for cmd_status to change
@@ -198,8 +263,7 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
 {
        cmd->cmd_status = ENODATA;
 
-       writel(cmd->frame_phys_addr >> 3,
-              &instance->reg_set->inbound_queue_port);
+       instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
 
        wait_event(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA));
 
@@ -242,8 +306,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
        cmd->sync_cmd = 1;
        cmd->cmd_status = 0xFF;
 
-       writel(cmd->frame_phys_addr >> 3,
-              &instance->reg_set->inbound_queue_port);
+       instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
 
        /*
         * Wait for this cmd to complete
@@ -558,112 +621,29 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
 }
 
 /**
- * megasas_build_cmd - Prepares a command packet
- * @instance:          Adapter soft state
- * @scp:               SCSI command
- * @frame_count:       [OUT] Number of frames used to prepare this command
+ * megasas_is_ldio -           Checks if the cmd is for logical drive
+ * @scmd:                      SCSI command
+ *     
+ * Called by megasas_queue_command to find out if the command to be queued
+ * is a logical drive command  
  */
-static struct megasas_cmd *megasas_build_cmd(struct megasas_instance
-                                                   *instance,
-                                                   struct scsi_cmnd *scp,
-                                                   int *frame_count)
+static inline int megasas_is_ldio(struct scsi_cmnd *cmd)
 {
-       u32 logical_cmd;
-       struct megasas_cmd *cmd;
-
-       /*
-        * Find out if this is logical or physical drive command.
-        */
-       logical_cmd = MEGASAS_IS_LOGICAL(scp);
-
-       /*
-        * Logical drive command
-        */
-       if (logical_cmd) {
-
-               if (scp->device->id >= MEGASAS_MAX_LD) {
-                       scp->result = DID_BAD_TARGET << 16;
-                       return NULL;
-               }
-
-               switch (scp->cmnd[0]) {
-
-               case READ_10:
-               case WRITE_10:
-               case READ_12:
-               case WRITE_12:
-               case READ_6:
-               case WRITE_6:
-               case READ_16:
-               case WRITE_16:
-                       /*
-                        * Fail for LUN > 0
-                        */
-                       if (scp->device->lun) {
-                               scp->result = DID_BAD_TARGET << 16;
-                               return NULL;
-                       }
-
-                       cmd = megasas_get_cmd(instance);
-
-                       if (!cmd) {
-                               scp->result = DID_IMM_RETRY << 16;
-                               return NULL;
-                       }
-
-                       *frame_count = megasas_build_ldio(instance, scp, cmd);
-
-                       if (!(*frame_count)) {
-                               megasas_return_cmd(instance, cmd);
-                               return NULL;
-                       }
-
-                       return cmd;
-
-               default:
-                       /*
-                        * Fail for LUN > 0
-                        */
-                       if (scp->device->lun) {
-                               scp->result = DID_BAD_TARGET << 16;
-                               return NULL;
-                       }
-
-                       cmd = megasas_get_cmd(instance);
-
-                       if (!cmd) {
-                               scp->result = DID_IMM_RETRY << 16;
-                               return NULL;
-                       }
-
-                       *frame_count = megasas_build_dcdb(instance, scp, cmd);
-
-                       if (!(*frame_count)) {
-                               megasas_return_cmd(instance, cmd);
-                               return NULL;
-                       }
-
-                       return cmd;
-               }
-       } else {
-               cmd = megasas_get_cmd(instance);
-
-               if (!cmd) {
-                       scp->result = DID_IMM_RETRY << 16;
-                       return NULL;
-               }
-
-               *frame_count = megasas_build_dcdb(instance, scp, cmd);
-
-               if (!(*frame_count)) {
-                       megasas_return_cmd(instance, cmd);
-                       return NULL;
-               }
-
-               return cmd;
+       if (!MEGASAS_IS_LOGICAL(cmd))
+               return 0;
+       switch (cmd->cmnd[0]) {
+       case READ_10:
+       case WRITE_10:
+       case READ_12:
+       case WRITE_12:
+       case READ_6:
+       case WRITE_6:
+       case READ_16:
+       case WRITE_16:
+               return 1;
+       default:
+               return 0;
        }
-
-       return NULL;
 }
 
 /**
@@ -684,13 +664,27 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
        scmd->scsi_done = done;
        scmd->result = 0;
 
-       cmd = megasas_build_cmd(instance, scmd, &frame_count);
-
-       if (!cmd) {
-               done(scmd);
-               return 0;
+       if (MEGASAS_IS_LOGICAL(scmd) &&
+           (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) {
+               scmd->result = DID_BAD_TARGET << 16;
+               goto out_done;
        }
 
+       cmd = megasas_get_cmd(instance);
+       if (!cmd)
+               return SCSI_MLQUEUE_HOST_BUSY;
+
+       /*
+        * Logical drive command
+        */
+       if (megasas_is_ldio(scmd))
+               frame_count = megasas_build_ldio(instance, scmd, cmd);
+       else
+               frame_count = megasas_build_dcdb(instance, scmd, cmd);
+
+       if (!frame_count)
+               goto out_return_cmd;
+
        cmd->scmd = scmd;
        scmd->SCp.ptr = (char *)cmd;
        scmd->SCp.sent_command = jiffies;
@@ -702,10 +696,15 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
        instance->fw_outstanding++;
        spin_unlock_irqrestore(&instance->instance_lock, flags);
 
-       writel(((cmd->frame_phys_addr >> 3) | (cmd->frame_count - 1)),
-              &instance->reg_set->inbound_queue_port);
+       instance->instancet->fire_cmd(cmd->frame_phys_addr ,cmd->frame_count-1,instance->reg_set);
 
        return 0;
+
+ out_return_cmd:
+       megasas_return_cmd(instance, cmd);
+ out_done:
+       done(scmd);
+       return 0;
 }
 
 /**
@@ -1108,7 +1107,6 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
 static int
 megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status)
 {
-       u32 status;
        u32 producer;
        u32 consumer;
        u32 context;
@@ -1116,17 +1114,10 @@ megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status)
 
        /*
         * Check if it is our interrupt
+        * Clear the interrupt 
         */
-       status = readl(&instance->reg_set->outbound_intr_status);
-
-       if (!(status & MFI_OB_INTR_STATUS_MASK)) {
+       if(instance->instancet->clear_intr(instance->reg_set))
                return IRQ_NONE;
-       }
-
-       /*
-        * Clear the interrupt by writing back the same value
-        */
-       writel(status, &instance->reg_set->outbound_intr_status);
 
        producer = *instance->producer;
        consumer = *instance->consumer;
@@ -1160,7 +1151,7 @@ static irqreturn_t megasas_isr(int irq, void *devp, struct pt_regs *regs)
 
 /**
  * megasas_transition_to_ready -       Move the FW to READY state
- * @reg_set:                           MFI register set
+ * @instance:                          Adapter soft state
  *
  * During the initialization, FW passes can potentially be in any one of
  * several possible states. If the FW in operational, waiting-for-handshake
@@ -1168,14 +1159,14 @@ static irqreturn_t megasas_isr(int irq, void *devp, struct pt_regs *regs)
  * has to wait for the ready state.
  */
 static int
-megasas_transition_to_ready(struct megasas_register_set __iomem * reg_set)
+megasas_transition_to_ready(struct megasas_instance* instance)
 {
        int i;
        u8 max_wait;
        u32 fw_state;
        u32 cur_state;
 
-       fw_state = readl(&reg_set->outbound_msg_0) & MFI_STATE_MASK;
+       fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
 
        while (fw_state != MFI_STATE_READY) {
 
@@ -1193,7 +1184,7 @@ megasas_transition_to_ready(struct megasas_register_set __iomem * reg_set)
                         * Set the CLR bit in inbound doorbell
                         */
                        writel(MFI_INIT_CLEAR_HANDSHAKE,
-                              &reg_set->inbound_doorbell);
+                               &instance->reg_set->inbound_doorbell);
 
                        max_wait = 2;
                        cur_state = MFI_STATE_WAIT_HANDSHAKE;
@@ -1203,8 +1194,8 @@ megasas_transition_to_ready(struct megasas_register_set __iomem * reg_set)
                        /*
                         * Bring it to READY state; assuming max wait 2 secs
                         */
-                       megasas_disable_intr(reg_set);
-                       writel(MFI_INIT_READY, &reg_set->inbound_doorbell);
+                       megasas_disable_intr(instance->reg_set);
+                       writel(MFI_INIT_READY, &instance->reg_set->inbound_doorbell);
 
                        max_wait = 10;
                        cur_state = MFI_STATE_OPERATIONAL;
@@ -1253,8 +1244,8 @@ megasas_transition_to_ready(struct megasas_register_set __iomem * reg_set)
                 * The cur_state should not last for more than max_wait secs
                 */
                for (i = 0; i < (max_wait * 1000); i++) {
-                       fw_state = MFI_STATE_MASK &
-                           readl(&reg_set->outbound_msg_0);
+                       fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) &  
+                                       MFI_STATE_MASK ;
 
                        if (fw_state == cur_state) {
                                msleep(1);
@@ -1616,18 +1607,20 @@ static int megasas_init_mfi(struct megasas_instance *instance)
 
        reg_set = instance->reg_set;
 
+       instance->instancet = &megasas_instance_template_xscale;
+
        /*
         * We expect the FW state to be READY
         */
-       if (megasas_transition_to_ready(instance->reg_set))
+       if (megasas_transition_to_ready(instance))
                goto fail_ready_state;
 
        /*
         * Get various operational parameters from status register
         */
-       instance->max_fw_cmds = readl(&reg_set->outbound_msg_0) & 0x00FFFF;
-       instance->max_num_sge = (readl(&reg_set->outbound_msg_0) & 0xFF0000) >>
-           0x10;
+       instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+       instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 
+                                       0x10;
        /*
         * Create a pool of commands
         */
@@ -1936,8 +1929,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
        /*
         * Issue the aen registration frame
         */
-       writel(cmd->frame_phys_addr >> 3,
-              &instance->reg_set->inbound_queue_port);
+       instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
 
        return 0;
 }
@@ -2126,7 +2118,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                goto fail_irq;
        }
 
-       megasas_enable_intr(instance->reg_set);
+       instance->instancet->enable_intr(instance->reg_set);
 
        /*
         * Store instance in PCI softstate
@@ -2681,9 +2673,8 @@ megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
                          unsigned long arg)
 {
        switch (cmd) {
-       case MEGASAS_IOC_FIRMWARE:{
-                       return megasas_mgmt_compat_ioctl_fw(file, arg);
-               }
+       case MEGASAS_IOC_FIRMWARE32:
+               return megasas_mgmt_compat_ioctl_fw(file, arg);
        case MEGASAS_IOC_GET_AEN:
                return megasas_mgmt_ioctl_aen(file, arg);
        }
index eaec9d531424cc8359e273629d0eebb677725360..d6d166c0663ff664930b06432b4c2753c93631b3 100644 (file)
 /**
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "00.00.02.00-rc4"
-#define MEGASAS_RELDATE                                "Sep 16, 2005"
-#define MEGASAS_EXT_VERSION                    "Fri Sep 16 12:37:08 EDT 2005"
-
+#define MEGASAS_VERSION                                "00.00.02.02"
+#define MEGASAS_RELDATE                                "Jan 23, 2006"
+#define MEGASAS_EXT_VERSION                    "Mon Jan 23 14:09:01 PST 2006"
 /*
  * =====================================
  * MegaRAID SAS MFI firmware definitions
@@ -1013,6 +1012,16 @@ struct megasas_evt_detail {
 
 } __attribute__ ((packed));
 
+ struct megasas_instance_template {
+       void (*fire_cmd)(dma_addr_t ,u32 ,struct megasas_register_set __iomem *);
+
+       void (*enable_intr)(struct megasas_register_set __iomem *) ;
+
+       int (*clear_intr)(struct megasas_register_set __iomem *);
+
+       u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
+ };
+
 struct megasas_instance {
 
        u32 *producer;
@@ -1056,6 +1065,8 @@ struct megasas_instance {
        u32 fw_outstanding;
        u32 hw_crit_error;
        spinlock_t instance_lock;
+
+       struct megasas_instance_template *instancet;
 };
 
 #define MEGASAS_IS_LOGICAL(scp)                                                \
@@ -1125,11 +1136,10 @@ struct compat_megasas_iocpacket {
        struct compat_iovec sgl[MAX_IOCTL_SGE];
 } __attribute__ ((packed));
 
-#define MEGASAS_IOC_FIRMWARE   _IOWR('M', 1, struct compat_megasas_iocpacket)
-#else
-#define MEGASAS_IOC_FIRMWARE   _IOWR('M', 1, struct megasas_iocpacket)
 #endif
 
+#define MEGASAS_IOC_FIRMWARE   _IOWR('M', 1, struct megasas_iocpacket)
+#define MEGASAS_IOC_FIRMWARE32 _IOWR('M', 1, struct compat_megasas_iocpacket)
 #define MEGASAS_IOC_GET_AEN    _IOW('M', 3, struct megasas_aen)
 
 struct megasas_mgmt_info {
index 0878f95b54499fc0c26a807624c3cb1345821b0a..e0230249fa0fb55cb9cda06f399b2c8ee0318e9f 100644 (file)
 * General Public License for more details.
 *
 ******************************************************************************/
-#define QLA1280_VERSION      "3.25"
+#define QLA1280_VERSION      "3.26"
 /*****************************************************************************
     Revision History:
+    Rev  3.26, January 16, 2006 Jes Sorensen
+       - Ditch all < 2.6 support
     Rev  3.25.1, February 10, 2005 Christoph Hellwig
        - use pci_map_single to map non-S/G requests
        - remove qla1280_proc_info
 #include <asm/types.h>
 #include <asm/system.h>
 
-#if LINUX_VERSION_CODE >= 0x020545
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_tcq.h>
-#else
-#include <linux/blk.h>
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "sd.h"
-#endif
 
 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
 #include <asm/sn/io.h>
 #endif
 
-#if LINUX_VERSION_CODE < 0x020407
-#error "Kernels older than 2.4.7 are no longer supported"
+#if LINUX_VERSION_CODE < 0x020600
+#error "Kernels older than 2.6.0 are no longer supported"
 #endif
 
 
 
 #define NVRAM_DELAY()                  udelay(500)     /* 2 microseconds */
 
-#if LINUX_VERSION_CODE < 0x020500
-#define HOST_LOCK                      &io_request_lock
-#define irqreturn_t                    void
-#define IRQ_RETVAL(foo)
-#define MSG_ORDERED_TAG                        1
-
-#define DMA_BIDIRECTIONAL      SCSI_DATA_UNKNOWN
-#define DMA_TO_DEVICE          SCSI_DATA_WRITE
-#define DMA_FROM_DEVICE                SCSI_DATA_READ
-#define DMA_NONE               SCSI_DATA_NONE
-
-#ifndef HAVE_SECTOR_T
-typedef unsigned int sector_t;
-#endif
-
-static inline void
-scsi_adjust_queue_depth(struct scsi_device *device, int tag, int depth)
-{
-       if (tag) {
-               device->tagged_queue = tag;
-               device->current_tag = 0;
-       }
-       device->queue_depth = depth;
-}
-static inline struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *t, size_t s)
-{
-       return scsi_register(t, s);
-}
-static inline void scsi_host_put(struct Scsi_Host *h)
-{
-       scsi_unregister(h);
-}
-#else
-#define HOST_LOCK                      ha->host->host_lock
-#endif
-#if LINUX_VERSION_CODE < 0x020600
-#define DEV_SIMPLE_TAGS(device)                device->tagged_queue
-/*
- * Hack around that qla1280_remove_one is called from
- * qla1280_release in 2.4
- */
-#undef __devexit
-#define __devexit
-#else
-#define DEV_SIMPLE_TAGS(device)                device->simple_tags
-#endif
 #if defined(__ia64__) && !defined(ia64_platform_is)
 #define ia64_platform_is(foo)          (!strcmp(x, platform_name))
 #endif
@@ -506,9 +455,6 @@ static void qla1280_remove_one(struct pci_dev *);
  *  QLogic Driver Support Function Prototypes.
  */
 static void qla1280_done(struct scsi_qla_host *);
-#if LINUX_VERSION_CODE < 0x020545
-static void qla1280_get_target_options(struct scsi_cmnd *, struct scsi_qla_host *);
-#endif
 static int qla1280_get_token(char *);
 static int qla1280_setup(char *s) __init;
 
@@ -610,11 +556,7 @@ __setup("qla1280=", qla1280_setup);
 #define        CMD_SNSLEN(Cmnd)        sizeof(Cmnd->sense_buffer)
 #define        CMD_RESULT(Cmnd)        Cmnd->result
 #define        CMD_HANDLE(Cmnd)        Cmnd->host_scribble
-#if LINUX_VERSION_CODE < 0x020545
-#define CMD_REQUEST(Cmnd)      Cmnd->request.cmd
-#else
 #define CMD_REQUEST(Cmnd)      Cmnd->request->cmd
-#endif
 
 #define CMD_HOST(Cmnd)         Cmnd->device->host
 #define SCSI_BUS_32(Cmnd)      Cmnd->device->channel
@@ -1064,10 +1006,10 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
        add_timer(&timer);
 
        /* wait for the action to complete (or the timer to expire) */
-       spin_unlock_irq(HOST_LOCK);
+       spin_unlock_irq(ha->host->host_lock);
        wait_for_completion(&wait);
        del_timer_sync(&timer);
-       spin_lock_irq(HOST_LOCK);
+       spin_lock_irq(ha->host->host_lock);
        sp->wait = NULL;
 
        /* the only action we might get a fail for is abort */
@@ -1173,96 +1115,6 @@ qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
        return 0;
 }
 
-#if LINUX_VERSION_CODE < 0x020600
-static int
-qla1280_detect(struct scsi_host_template *template)
-{
-       struct pci_device_id *id = &qla1280_pci_tbl[0];
-       struct pci_dev *pdev = NULL;
-       int num_hosts = 0;
-
-       if (sizeof(struct srb) > sizeof(Scsi_Pointer)) {
-               printk(KERN_WARNING
-                      "qla1280: struct srb too big, aborting\n");
-               return 0;
-       }
-
-       if ((DMA_BIDIRECTIONAL != PCI_DMA_BIDIRECTIONAL) ||
-           (DMA_TO_DEVICE != PCI_DMA_TODEVICE) ||
-           (DMA_FROM_DEVICE != PCI_DMA_FROMDEVICE) ||
-           (DMA_NONE != PCI_DMA_NONE)) {
-               printk(KERN_WARNING
-                      "qla1280: dma direction bits don't match\n");
-               return 0;
-       }
-
-#ifdef MODULE
-       /*
-        * If we are called as a module, the qla1280 pointer may not be null
-        * and it would point to our bootup string, just like on the lilo
-        * command line.  IF not NULL, then process this config string with
-        * qla1280_setup
-        *
-        * Boot time Options
-        * To add options at boot time add a line to your lilo.conf file like:
-        * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
-        * which will result in the first four devices on the first two
-        * controllers being set to a tagged queue depth of 32.
-        */
-       if (qla1280)
-               qla1280_setup(qla1280);
-#endif
-
-       /* First Initialize QLA12160 on PCI Bus 1 Dev 2 */
-       while ((pdev = pci_find_device(id->vendor, id->device, pdev))) {
-               if (pdev->bus->number == 1 && PCI_SLOT(pdev->devfn) == 2) {
-                       if (!qla1280_probe_one(pdev, id))
-                               num_hosts++;
-               }
-       }
-
-       pdev = NULL;
-       /* Try and find each different type of adapter we support */
-       for (id = &qla1280_pci_tbl[0]; id->device; id++) {
-               while ((pdev = pci_find_device(id->vendor, id->device, pdev))) {
-                       /*
-                        * skip QLA12160 already initialized on
-                        * PCI Bus 1 Dev 2 since we already initialized
-                        * and presented it
-                        */
-                       if (id->device == PCI_DEVICE_ID_QLOGIC_ISP12160 &&
-                           pdev->bus->number == 1 &&
-                           PCI_SLOT(pdev->devfn) == 2)
-                               continue;
-
-                       if (!qla1280_probe_one(pdev, id))
-                               num_hosts++;
-               }
-       }
-
-       return num_hosts;
-}
-
-/*
- * This looks a bit ugly as we could just pass down host to
- * qla1280_remove_one, but I want to keep qla1280_release purely a wrapper
- * around pci_driver::remove as used from 2.6 onwards.
- */
-static int
-qla1280_release(struct Scsi_Host *host)
-{
-       struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
-
-       qla1280_remove_one(ha->pdev);
-       return 0;
-}
-
-static int
-qla1280_biosparam_old(Disk * disk, kdev_t dev, int geom[])
-{
-       return qla1280_biosparam(disk->device, NULL, disk->capacity, geom);
-}
-#endif
  
 /* disable risc and host interrupts */
 static inline void
@@ -1295,7 +1147,7 @@ qla1280_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
        ENTER_INTR ("qla1280_intr_handler");
        ha = (struct scsi_qla_host *)dev_id;
 
-       spin_lock(HOST_LOCK);
+       spin_lock(ha->host->host_lock);
 
        ha->isr_count++;
        reg = ha->iobase;
@@ -1311,7 +1163,7 @@ qla1280_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
        if (!list_empty(&ha->done_q))
                qla1280_done(ha);
 
-       spin_unlock(HOST_LOCK);
+       spin_unlock(ha->host->host_lock);
 
        qla1280_enable_intrs(ha);
 
@@ -1411,11 +1263,9 @@ qla1280_slave_configure(struct scsi_device *device)
                scsi_adjust_queue_depth(device, 0, default_depth);
        }
 
-#if LINUX_VERSION_CODE > 0x020500
        nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
        nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
        nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
-#endif
 
        if (driver_setup.no_sync ||
            (driver_setup.sync_mask &&
@@ -1432,38 +1282,14 @@ qla1280_slave_configure(struct scsi_device *device)
                        nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
        }
 
-       spin_lock_irqsave(HOST_LOCK, flags);
+       spin_lock_irqsave(ha->host->host_lock, flags);
        if (nv->bus[bus].target[target].parameter.enable_sync)
                status = qla1280_set_target_parameters(ha, bus, target);
        qla1280_get_target_parameters(ha, device);
-       spin_unlock_irqrestore(HOST_LOCK, flags);
+       spin_unlock_irqrestore(ha->host->host_lock, flags);
        return status;
 }
 
-#if LINUX_VERSION_CODE < 0x020545
-/**************************************************************************
- *   qla1280_select_queue_depth
- *
- *   Sets the queue depth for each SCSI device hanging off the input
- *   host adapter.  We use a queue depth of 2 for devices that do not
- *   support tagged queueing.
- **************************************************************************/
-static void
-qla1280_select_queue_depth(struct Scsi_Host *host, struct scsi_device *sdev_q)
-{
-       struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
-       struct scsi_device *sdev;
-
-       ENTER("qla1280_select_queue_depth");
-       for (sdev = sdev_q; sdev; sdev = sdev->next)
-               if (sdev->host == host)
-                       qla1280_slave_configure(sdev);
-
-       if (sdev_q)
-               qla1280_check_for_dead_scsi_bus(ha, sdev_q->channel);
-       LEAVE("qla1280_select_queue_depth");
-}
-#endif
 
 /*
  * qla1280_done
@@ -1523,10 +1349,6 @@ qla1280_done(struct scsi_qla_host *ha)
                CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE;
                ha->actthreads--;
 
-#if LINUX_VERSION_CODE < 0x020500
-               if (cmd->cmnd[0] == INQUIRY)
-                       qla1280_get_target_options(cmd, ha);
-#endif
                (*(cmd)->scsi_done)(cmd);
 
                if(sp->wait != NULL)
@@ -1655,9 +1477,7 @@ qla1280_initialize_adapter(struct scsi_qla_host *ha)
        struct device_reg __iomem *reg;
        int status;
        int bus;
-#if LINUX_VERSION_CODE > 0x020500
        unsigned long flags;
-#endif
 
        ENTER("qla1280_initialize_adapter");
 
@@ -1695,15 +1515,12 @@ qla1280_initialize_adapter(struct scsi_qla_host *ha)
                        "NVRAM\n");
        }
 
-#if LINUX_VERSION_CODE >= 0x020500
        /*
         * It's necessary to grab the spin here as qla1280_mailbox_command
         * needs to be able to drop the lock unconditionally to wait
         * for completion.
-        * In 2.4 ->detect is called with the io_request_lock held.
         */
-       spin_lock_irqsave(HOST_LOCK, flags);
-#endif
+       spin_lock_irqsave(ha->host->host_lock, flags);
 
        status = qla1280_load_firmware(ha);
        if (status) {
@@ -1735,9 +1552,8 @@ qla1280_initialize_adapter(struct scsi_qla_host *ha)
 
        ha->flags.online = 1;
  out:
-#if LINUX_VERSION_CODE >= 0x020500
-       spin_unlock_irqrestore(HOST_LOCK, flags);
-#endif
+       spin_unlock_irqrestore(ha->host->host_lock, flags);
+
        if (status)
                dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
 
@@ -2650,14 +2466,14 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
        timer.function = qla1280_mailbox_timeout;
        add_timer(&timer);
 
-       spin_unlock_irq(HOST_LOCK);
+       spin_unlock_irq(ha->host->host_lock);
        WRT_REG_WORD(&reg->host_cmd, HC_SET_HOST_INT);
        data = qla1280_debounce_register(&reg->istatus);
 
        wait_for_completion(&wait);
        del_timer_sync(&timer);
 
-       spin_lock_irq(HOST_LOCK);
+       spin_lock_irq(ha->host->host_lock);
 
        ha->mailbox_wait = NULL;
 
@@ -2770,9 +2586,9 @@ qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
                        ha->bus_settings[bus].scsi_bus_dead = 1;
                ha->bus_settings[bus].failed_reset_count++;
        } else {
-               spin_unlock_irq(HOST_LOCK);
+               spin_unlock_irq(ha->host->host_lock);
                ssleep(reset_delay);
-               spin_lock_irq(HOST_LOCK);
+               spin_lock_irq(ha->host->host_lock);
 
                ha->bus_settings[bus].scsi_bus_dead = 0;
                ha->bus_settings[bus].failed_reset_count = 0;
@@ -3078,7 +2894,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
                (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
 
        /* Enable simple tag queuing if device supports it. */
-       if (DEV_SIMPLE_TAGS(cmd->device))
+       if (cmd->device->simple_tags)
                pkt->control_flags |= cpu_to_le16(BIT_3);
 
        /* Load SCSI command packet. */
@@ -3377,7 +3193,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
                (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
 
        /* Enable simple tag queuing if device supports it. */
-       if (DEV_SIMPLE_TAGS(cmd->device))
+       if (cmd->device->simple_tags)
                pkt->control_flags |= cpu_to_le16(BIT_3);
 
        /* Load SCSI command packet. */
@@ -3889,50 +3705,6 @@ qla1280_rst_aen(struct scsi_qla_host *ha)
 }
 
 
-#if LINUX_VERSION_CODE < 0x020500
-/*
- *
- */
-static void
-qla1280_get_target_options(struct scsi_cmnd *cmd, struct scsi_qla_host *ha)
-{
-       unsigned char *result;
-       struct nvram *n;
-       int bus, target, lun;
-
-       bus = SCSI_BUS_32(cmd);
-       target = SCSI_TCN_32(cmd);
-       lun = SCSI_LUN_32(cmd);
-
-       /*
-        * Make sure to not touch anything if someone is using the
-        * sg interface.
-        */
-       if (cmd->use_sg || (CMD_RESULT(cmd) >> 16) != DID_OK || lun)
-               return;
-
-       result = cmd->request_buffer;
-       n = &ha->nvram;
-
-       n->bus[bus].target[target].parameter.enable_wide = 0;
-       n->bus[bus].target[target].parameter.enable_sync = 0;
-       n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
-
-        if (result[7] & 0x60)
-               n->bus[bus].target[target].parameter.enable_wide = 1;
-        if (result[7] & 0x10)
-               n->bus[bus].target[target].parameter.enable_sync = 1;
-       if ((result[2] >= 3) && (result[4] + 5 > 56) &&
-           (result[56] & 0x4))
-               n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
-
-       dprintk(2, "get_target_options(): wide %i, sync %i, ppr %i\n",
-               n->bus[bus].target[target].parameter.enable_wide,
-               n->bus[bus].target[target].parameter.enable_sync,
-               n->bus[bus].target[target].ppr_1x160.flags.enable_ppr);
-}
-#endif
-
 /*
  *  qla1280_status_entry
  *      Processes received ISP status entry.
@@ -4271,7 +4043,7 @@ qla1280_get_target_parameters(struct scsi_qla_host *ha,
        } else
                printk(" Async");
 
-       if (DEV_SIMPLE_TAGS(device))
+       if (device->simple_tags)
                printk(", Tagged queuing: depth %d", device->queue_depth);
        printk("\n");
 }
@@ -4485,7 +4257,7 @@ qla1280_get_token(char *str)
        return ret;
 }
 
-#if LINUX_VERSION_CODE >= 0x020600
+
 static struct scsi_host_template qla1280_driver_template = {
        .module                 = THIS_MODULE,
        .proc_name              = "qla1280",
@@ -4504,27 +4276,7 @@ static struct scsi_host_template qla1280_driver_template = {
        .cmd_per_lun            = 1,
        .use_clustering         = ENABLE_CLUSTERING,
 };
-#else
-static struct scsi_host_template qla1280_driver_template = {
-       .proc_name              = "qla1280",
-       .name                   = "Qlogic ISP 1280/12160",
-       .detect                 = qla1280_detect,
-       .release                = qla1280_release,
-       .info                   = qla1280_info,
-       .queuecommand           = qla1280_queuecommand,
-       .eh_abort_handler       = qla1280_eh_abort,
-       .eh_device_reset_handler= qla1280_eh_device_reset,
-       .eh_bus_reset_handler   = qla1280_eh_bus_reset,
-       .eh_host_reset_handler  = qla1280_eh_adapter_reset,
-       .bios_param             = qla1280_biosparam_old,
-       .can_queue              = 0xfffff,
-       .this_id                = -1,
-       .sg_tablesize           = SG_ALL,
-       .cmd_per_lun            = 1,
-       .use_clustering         = ENABLE_CLUSTERING,
-       .use_new_eh_code        = 1,
-};
-#endif
+
 
 static int __devinit
 qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -4615,10 +4367,6 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        host->max_sectors = 1024;
        host->unique_id = host->host_no;
 
-#if LINUX_VERSION_CODE < 0x020545
-       host->select_queue_depths = qla1280_select_queue_depth;
-#endif
-
        error = -ENODEV;
 
 #if MEMORY_MAPPED_IO
@@ -4666,21 +4414,15 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
        pci_set_drvdata(pdev, host);
 
-#if LINUX_VERSION_CODE >= 0x020600
        error = scsi_add_host(host, &pdev->dev);
        if (error)
                goto error_disable_adapter;
        scsi_scan_host(host);
-#else
-       scsi_set_pci_device(host, pdev);
-#endif
 
        return 0;
 
-#if LINUX_VERSION_CODE >= 0x020600
  error_disable_adapter:
        qla1280_disable_intrs(ha);
-#endif
  error_free_irq:
        free_irq(pdev->irq, ha);
  error_release_region:
@@ -4712,9 +4454,7 @@ qla1280_remove_one(struct pci_dev *pdev)
        struct Scsi_Host *host = pci_get_drvdata(pdev);
        struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
 
-#if LINUX_VERSION_CODE >= 0x020600
        scsi_remove_host(host);
-#endif
 
        qla1280_disable_intrs(ha);
 
@@ -4738,7 +4478,6 @@ qla1280_remove_one(struct pci_dev *pdev)
        scsi_host_put(host);
 }
 
-#if LINUX_VERSION_CODE >= 0x020600
 static struct pci_driver qla1280_pci_driver = {
        .name           = "qla1280",
        .id_table       = qla1280_pci_tbl,
@@ -4784,10 +4523,6 @@ qla1280_exit(void)
 module_init(qla1280_init);
 module_exit(qla1280_exit);
 
-#else
-# define driver_template qla1280_driver_template
-# include "scsi_module.c"
-#endif
 
 MODULE_AUTHOR("Qlogic & Jes Sorensen");
 MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
index 79d8a914f9d0f4ddb213fd1bd9d5b63f68c39138..bad066e5772acfce0079784ef2fada7c73a9a707 100644 (file)
@@ -1680,7 +1680,8 @@ typedef struct fc_port {
        uint8_t mp_byte;                /* multi-path byte (not used) */
        uint8_t cur_path;               /* current path id */
 
-       struct fc_rport *rport;
+       spinlock_t rport_lock;
+       struct fc_rport *rport, *drport;
        u32 supported_classes;
        struct work_struct rport_add_work;
        struct work_struct rport_del_work;
@@ -2270,6 +2271,7 @@ typedef struct scsi_qla_host {
 #define LOOP_RESET_NEEDED      24
 #define BEACON_BLINK_NEEDED    25
 #define REGISTER_FDMI_NEEDED   26
+#define FCPORT_UPDATE_NEEDED   27
 
        uint32_t        device_flags;
 #define DFLG_LOCAL_DEVICES             BIT_0
index 32be4c14cccb50044a77b1c53b5b9be1eb5cc81d..35266bd5d5383ba75919f1e285601577ad3abd54 100644 (file)
@@ -47,9 +47,11 @@ extern int qla2x00_local_device_login(scsi_qla_host_t *, uint16_t);
 extern void qla2x00_restart_queues(scsi_qla_host_t *, uint8_t);
 
 extern void qla2x00_rescan_fcports(scsi_qla_host_t *);
+extern void qla2x00_update_fcports(scsi_qla_host_t *);
 
 extern int qla2x00_abort_isp(scsi_qla_host_t *);
 
+extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
 extern void qla2x00_reg_remote_port(scsi_qla_host_t *, fc_port_t *);
 
 /*
@@ -70,8 +72,8 @@ extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
 
 extern void qla2x00_cmd_timeout(srb_t *);
 
-extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int);
-extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *);
+extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
+extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
 
 extern void qla2x00_blink_led(scsi_qla_host_t *);
 
index a91fea69ad63597b938b9afde16260ebb358d476..e67bb099781818339b6a1ef6342694c54825860c 100644 (file)
@@ -32,7 +32,6 @@ static int qla2x00_fw_ready(scsi_qla_host_t *);
 static int qla2x00_configure_hba(scsi_qla_host_t *);
 static int qla2x00_configure_loop(scsi_qla_host_t *);
 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
-static void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
 static int qla2x00_configure_fabric(scsi_qla_host_t *);
 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
 static int qla2x00_device_resync(scsi_qla_host_t *);
@@ -1688,10 +1687,16 @@ static void
 qla2x00_rport_del(void *data)
 {
        fc_port_t *fcport = data;
+       struct fc_rport *rport;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fcport->rport_lock, flags);
+       rport = fcport->drport;
+       fcport->drport = NULL;
+       spin_unlock_irqrestore(&fcport->rport_lock, flags);
+       if (rport)
+               fc_remote_port_delete(rport);
 
-       if (fcport->rport)
-               fc_remote_port_delete(fcport->rport);
-       fcport->rport = NULL;
 }
 
 /**
@@ -1719,6 +1724,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
        atomic_set(&fcport->state, FCS_UNCONFIGURED);
        fcport->flags = FCF_RLC_SUPPORT;
        fcport->supported_classes = FC_COS_UNSPECIFIED;
+       spin_lock_init(&fcport->rport_lock);
        INIT_WORK(&fcport->rport_add_work, qla2x00_rport_add, fcport);
        INIT_WORK(&fcport->rport_del_work, qla2x00_rport_del, fcport);
 
@@ -2008,7 +2014,7 @@ qla2x00_probe_for_all_luns(scsi_qla_host_t *ha)
 {
        fc_port_t       *fcport;
 
-       qla2x00_mark_all_devices_lost(ha);
+       qla2x00_mark_all_devices_lost(ha, 0);
        list_for_each_entry(fcport, &ha->fcports, list) {
                if (fcport->port_type != FCT_TARGET)
                        continue;
@@ -2032,13 +2038,9 @@ qla2x00_probe_for_all_luns(scsi_qla_host_t *ha)
  * Context:
  *     Kernel context.
  */
-static void
+void
 qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
 {
-       uint16_t        index;
-       unsigned long flags;
-       srb_t *sp;
-
        fcport->ha = ha;
        fcport->login_retry = 0;
        fcport->port_login_retry_count = ha->port_down_retry_count *
@@ -2047,28 +2049,6 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
            PORT_RETRY_TIME);
        fcport->flags &= ~FCF_LOGIN_NEEDED;
 
-       /*
-        * Check for outstanding cmd on tape Bypass LUN discovery if active
-        * command on tape.
-        */
-       if (fcport->flags & FCF_TAPE_PRESENT) {
-               spin_lock_irqsave(&ha->hardware_lock, flags);
-               for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
-                       fc_port_t *sfcp;
-
-                       if ((sp = ha->outstanding_cmds[index]) != 0) {
-                               sfcp = sp->fcport;
-                               if (sfcp == fcport) {
-                                       atomic_set(&fcport->state, FCS_ONLINE);
-                                       spin_unlock_irqrestore(
-                                           &ha->hardware_lock, flags);
-                                       return;
-                               }
-                       }
-               }
-               spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       }
-
        if (fcport->port_type == FCT_INITIATOR ||
            fcport->port_type == FCT_BROADCAST)
                fcport->device_type = TYPE_PROCESSOR;
@@ -2084,24 +2064,29 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
 {
        struct fc_rport_identifiers rport_ids;
        struct fc_rport *rport;
+       unsigned long flags;
 
-       if (fcport->rport) {
-               fc_remote_port_delete(fcport->rport);
-               fcport->rport = NULL;
-       }
+       if (fcport->drport)
+               qla2x00_rport_del(fcport);
+       if (fcport->rport)
+               return;
 
        rport_ids.node_name = wwn_to_u64(fcport->node_name);
        rport_ids.port_name = wwn_to_u64(fcport->port_name);
        rport_ids.port_id = fcport->d_id.b.domain << 16 |
            fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
        rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
-       fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
+       rport = fc_remote_port_add(ha->host, 0, &rport_ids);
        if (!rport) {
                qla_printk(KERN_WARNING, ha,
                    "Unable to allocate fc remote port!\n");
                return;
        }
+       spin_lock_irqsave(&fcport->rport_lock, flags);
+       fcport->rport = rport;
        *((fc_port_t **)rport->dd_data) = fcport;
+       spin_unlock_irqrestore(&fcport->rport_lock, flags);
+
        rport->supported_classes = fcport->supported_classes;
 
        rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
@@ -2217,12 +2202,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
 
                        if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
                                qla2x00_mark_device_lost(ha, fcport,
-                                   ql2xplogiabsentdevice);
+                                   ql2xplogiabsentdevice, 0);
                                if (fcport->loop_id != FC_NO_LOOP_ID &&
                                    (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
                                    fcport->port_type != FCT_INITIATOR &&
                                    fcport->port_type != FCT_BROADCAST) {
-
                                        ha->isp_ops.fabric_logout(ha,
                                            fcport->loop_id,
                                            fcport->d_id.b.domain,
@@ -2694,7 +2678,8 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
                        if (atomic_read(&fcport->state) == FCS_ONLINE) {
                                if (format != 3 ||
                                    fcport->port_type != FCT_INITIATOR) {
-                                       qla2x00_mark_device_lost(ha, fcport, 0);
+                                       qla2x00_mark_device_lost(ha, fcport,
+                                           0, 0);
                                }
                        }
                        fcport->flags &= ~FCF_FARP_DONE;
@@ -2741,8 +2726,7 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport,
                        ha->isp_ops.fabric_logout(ha, fcport->loop_id,
                            fcport->d_id.b.domain, fcport->d_id.b.area,
                            fcport->d_id.b.al_pa);
-                       qla2x00_mark_device_lost(ha, fcport, 1);
-
+                       qla2x00_mark_device_lost(ha, fcport, 1, 0);
                } else {
                        qla2x00_update_fcport(ha, fcport);
                }
@@ -2855,7 +2839,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
                        ha->isp_ops.fabric_logout(ha, fcport->loop_id,
                            fcport->d_id.b.domain, fcport->d_id.b.area,
                            fcport->d_id.b.al_pa);
-                       qla2x00_mark_device_lost(ha, fcport, 1);
+                       qla2x00_mark_device_lost(ha, fcport, 1, 0);
 
                        rval = 1;
                        break;
@@ -2990,6 +2974,17 @@ qla2x00_rescan_fcports(scsi_qla_host_t *ha)
        qla2x00_probe_for_all_luns(ha);
 }
 
+void
+qla2x00_update_fcports(scsi_qla_host_t *ha)
+{
+       fc_port_t *fcport;
+
+       /* Go with deferred removal of rport references. */
+       list_for_each_entry(fcport, &ha->fcports, list)
+               if (fcport->drport)
+                       qla2x00_rport_del(fcport);
+}
+
 /*
 *  qla2x00_abort_isp
 *      Resets ISP and aborts all outstanding commands.
@@ -3019,7 +3014,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
                atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
                if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
                        atomic_set(&ha->loop_state, LOOP_DOWN);
-                       qla2x00_mark_all_devices_lost(ha);
+                       qla2x00_mark_all_devices_lost(ha, 0);
                } else {
                        if (!atomic_read(&ha->loop_down_timer))
                                atomic_set(&ha->loop_down_timer,
index f63af081d4ff8aaf219e2255e06e7a0c334f7c09..71a46fcee8cc47d3b7e0cba5cf0aad80786f8d2b 100644 (file)
@@ -389,7 +389,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
                if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
                        atomic_set(&ha->loop_state, LOOP_DOWN);
                        atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
-                       qla2x00_mark_all_devices_lost(ha);
+                       qla2x00_mark_all_devices_lost(ha, 1);
                }
 
                set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
@@ -432,7 +432,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
                        atomic_set(&ha->loop_state, LOOP_DOWN);
                        atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
                        ha->device_flags |= DFLG_NO_CABLE;
-                       qla2x00_mark_all_devices_lost(ha);
+                       qla2x00_mark_all_devices_lost(ha, 1);
                }
 
                ha->flags.management_server_logged_in = 0;
@@ -453,7 +453,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
                if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
                        atomic_set(&ha->loop_state, LOOP_DOWN);
                        atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
-                       qla2x00_mark_all_devices_lost(ha);
+                       qla2x00_mark_all_devices_lost(ha, 1);
                }
 
                set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
@@ -482,7 +482,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
                        if (!atomic_read(&ha->loop_down_timer))
                                atomic_set(&ha->loop_down_timer,
                                    LOOP_DOWN_TIME);
-                       qla2x00_mark_all_devices_lost(ha);
+                       qla2x00_mark_all_devices_lost(ha, 1);
                }
 
                if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
@@ -506,7 +506,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
                        if (!atomic_read(&ha->loop_down_timer))
                                atomic_set(&ha->loop_down_timer,
                                    LOOP_DOWN_TIME);
-                       qla2x00_mark_all_devices_lost(ha);
+                       qla2x00_mark_all_devices_lost(ha, 1);
                }
 
                set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
@@ -580,7 +580,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
                 */
                atomic_set(&ha->loop_state, LOOP_UP);
 
-               qla2x00_mark_all_devices_lost(ha);
+               qla2x00_mark_all_devices_lost(ha, 1);
 
                ha->flags.rscn_queue_overflow = 1;
 
@@ -1091,7 +1091,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 
                cp->result = DID_BUS_BUSY << 16;
                if (atomic_read(&fcport->state) == FCS_ONLINE) {
-                       qla2x00_mark_device_lost(ha, fcport, 1);
+                       qla2x00_mark_device_lost(ha, fcport, 1, 1);
                }
                break;
 
@@ -1135,7 +1135,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
 
                /* Check to see if logout occurred. */
                if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
-                       qla2x00_mark_device_lost(ha, fcport, 1);
+                       qla2x00_mark_device_lost(ha, fcport, 1, 1);
                break;
 
        case CS_QUEUE_FULL:
index 4916847d84ec9321a58629c0eef15543e73d2873..5866a7c706a82d627e71f597fe89970eb493d10f 100644 (file)
@@ -756,7 +756,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
                if (ret == SUCCESS) {
                        if (fcport->flags & FC_FABRIC_DEVICE) {
                                ha->isp_ops.fabric_logout(ha, fcport->loop_id);
-                               qla2x00_mark_device_lost(ha, fcport);
+                               qla2x00_mark_device_lost(ha, fcport, 0, 0);
                        }
                }
 #endif
@@ -1642,6 +1642,31 @@ qla2x00_free_device(scsi_qla_host_t *ha)
        pci_disable_device(ha->pdev);
 }
 
+static inline void
+qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
+    int defer)
+{
+       unsigned long flags;
+       struct fc_rport *rport;
+
+       if (!fcport->rport)
+               return;
+
+       rport = fcport->rport;
+       if (defer) {
+               spin_lock_irqsave(&fcport->rport_lock, flags);
+               fcport->drport = rport;
+               fcport->rport = NULL;
+               spin_unlock_irqrestore(&fcport->rport_lock, flags);
+               set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
+       } else {
+               spin_lock_irqsave(&fcport->rport_lock, flags);
+               fcport->rport = NULL;
+               spin_unlock_irqrestore(&fcport->rport_lock, flags);
+               fc_remote_port_delete(rport);
+       }
+}
+
 /*
  * qla2x00_mark_device_lost Updates fcport state when device goes offline.
  *
@@ -1652,10 +1677,10 @@ qla2x00_free_device(scsi_qla_host_t *ha)
  * Context:
  */
 void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
-    int do_login)
+    int do_login, int defer)
 {
-       if (atomic_read(&fcport->state) == FCS_ONLINE && fcport->rport)
-               schedule_work(&fcport->rport_del_work);
+       if (atomic_read(&fcport->state) == FCS_ONLINE)
+               qla2x00_schedule_rport_del(ha, fcport, defer);
 
        /*
         * We may need to retry the login, so don't change the state of the
@@ -1702,7 +1727,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
  * Context:
  */
 void
-qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha)
+qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
 {
        fc_port_t *fcport;
 
@@ -1716,10 +1741,13 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha)
                 */
                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
                        continue;
-               if (atomic_read(&fcport->state) == FCS_ONLINE && fcport->rport)
-                       schedule_work(&fcport->rport_del_work);
+               if (atomic_read(&fcport->state) == FCS_ONLINE)
+                       qla2x00_schedule_rport_del(ha, fcport, defer);
                atomic_set(&fcport->state, FCS_DEVICE_LOST);
        }
+
+       if (defer && ha->dpc_wait && !ha->dpc_active)
+               up(ha->dpc_wait);
 }
 
 /*
@@ -2161,6 +2189,9 @@ qla2x00_do_dpc(void *data)
                            ha->host_no));
                }
 
+               if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags))
+                       qla2x00_update_fcports(ha);
+
                if (test_and_clear_bit(LOOP_RESET_NEEDED, &ha->dpc_flags)) {
                        DEBUG(printk("scsi(%ld): dpc: sched loop_reset()\n",
                            ha->host_no));
@@ -2219,13 +2250,8 @@ qla2x00_do_dpc(void *data)
                                                DEBUG(printk("scsi(%ld): port login OK: logged in ID 0x%x\n",
                                                    ha->host_no, fcport->loop_id));
 
-                                               fcport->port_login_retry_count =
-                                                   ha->port_down_retry_count * PORT_RETRY_TIME;
-                                               atomic_set(&fcport->state, FCS_ONLINE);
-                                               atomic_set(&fcport->port_down_timer,
-                                                   ha->port_down_retry_count * PORT_RETRY_TIME);
-
-                                               fcport->login_retry = 0;
+                                               qla2x00_update_fcport(ha,
+                                                   fcport);
                                        } else if (status == 1) {
                                                set_bit(RELOGIN_NEEDED, &ha->dpc_flags);
                                                /* retry the login again */
@@ -2469,6 +2495,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
        if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
            test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
            test_bit(LOOP_RESET_NEEDED, &ha->dpc_flags) ||
+           test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags) ||
            start_dpc ||
            test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) ||
            test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
index 3d1ea09a06a1726825ad214f9e5236d38af28274..b0b0a69b3563c67a896f83517ae3b609a30aa011 100644 (file)
@@ -66,6 +66,7 @@ enum {
        board_2037x             = 0,    /* FastTrak S150 TX2plus */
        board_20319             = 1,    /* FastTrak S150 TX4 */
        board_20619             = 2,    /* FastTrak TX4000 */
+       board_20771             = 3,    /* FastTrak TX2300 */
 
        PDC_HAS_PATA            = (1 << 1), /* PDC20375 has PATA */
 
@@ -190,6 +191,16 @@ static const struct ata_port_info pdc_port_info[] = {
                .udma_mask      = 0x7f, /* udma0-6 ; FIXME */
                .port_ops       = &pdc_pata_ops,
        },
+
+       /* board_20771 */
+       {
+               .sht            = &pdc_ata_sht,
+               .host_flags     = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
+               .pio_mask       = 0x1f, /* pio0-4 */
+               .mwdma_mask     = 0x07, /* mwdma0-2 */
+               .udma_mask      = 0x7f, /* udma0-6 ; FIXME */
+               .port_ops       = &pdc_sata_ops,
+       },
 };
 
 static const struct pci_device_id pdc_ata_pci_tbl[] = {
@@ -226,6 +237,8 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
        { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
          board_20619 },
 
+       { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+         board_20771 },
        { }     /* terminate list */
 };
 
@@ -706,6 +719,9 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
        case board_2037x:
                probe_ent->n_ports = 2;
                break;
+       case board_20771:
+               probe_ent->n_ports = 2;
+               break;
        case board_20619:
                probe_ent->n_ports = 4;
 
index 668373590aa4111c6b873abb5163a96cf509e2b0..d8472563fde860cc0789f982bc896ab9104ba38e 100644 (file)
@@ -470,6 +470,7 @@ static const struct pci_device_id k2_sata_pci_tbl[] = {
        { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
        { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
        { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+       { 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
        { }
 };
 
index a2333d2c7af0be4712651b2780b7cc10566dab3b..5cc97b721661471b38a57346a6bebb759fc9ec10 100644 (file)
@@ -1350,7 +1350,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
        cmnd[4] = SCSI_REMOVAL_PREVENT;
        cmnd[5] = 0;
 
-       scsi_execute_async(sdev, cmnd, DMA_NONE, NULL, 0, 0, 10 * HZ,
+       scsi_execute_async(sdev, cmnd, 6, DMA_NONE, NULL, 0, 0, 10 * HZ,
                           5, NULL, NULL, GFP_KERNEL);
 }
 
index 3574ba935af8ea451741de17ae26b4e77087fe68..4a602853a98e72f89b94757c4b9e56b87587ad53 100644 (file)
@@ -436,6 +436,7 @@ free_bios:
  * scsi_execute_async - insert request
  * @sdev:      scsi device
  * @cmd:       scsi command
+ * @cmd_len:   length of scsi cdb
  * @data_direction: data direction
  * @buffer:    data buffer (this can be a kernel buffer or scatterlist)
  * @bufflen:   len of buffer
@@ -445,7 +446,7 @@ free_bios:
  * @flags:     or into request flags
  **/
 int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
-                      int data_direction, void *buffer, unsigned bufflen,
+                      int cmd_len, int data_direction, void *buffer, unsigned bufflen,
                       int use_sg, int timeout, int retries, void *privdata,
                       void (*done)(void *, char *, int, int), gfp_t gfp)
 {
@@ -472,7 +473,7 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
        if (err)
                goto free_req;
 
-       req->cmd_len = COMMAND_SIZE(cmd[0]);
+       req->cmd_len = cmd_len;
        memcpy(req->cmd, cmd, req->cmd_len);
        req->sense = sioc->sense;
        req->sense_len = 0;
index a3e0b7bc2d7bd1a71b31b0f4be0e091ae5b20b9b..210dab5879fa354bb99031f3e0981ca40e14c446 100644 (file)
@@ -377,7 +377,7 @@ static void sas_phy_release(struct device *dev)
 /**
  * sas_phy_alloc  --  allocates and initialize a SAS PHY structure
  * @parent:    Parent device
- * @number:    Port number
+ * @number:    Phy index
  *
  * Allocates an SAS PHY structure.  It will be added in the device tree
  * below the device specified by @parent, which has to be either a Scsi_Host
@@ -595,8 +595,8 @@ struct sas_rphy *sas_rphy_alloc(struct sas_phy *parent)
        device_initialize(&rphy->dev);
        rphy->dev.parent = get_device(&parent->dev);
        rphy->dev.release = sas_rphy_release;
-       sprintf(rphy->dev.bus_id, "rphy-%d:%d",
-               shost->host_no, parent->number);
+       sprintf(rphy->dev.bus_id, "rphy-%d:%d-%d",
+               shost->host_no, parent->port_identifier, parent->number);
        transport_setup_device(&rphy->dev);
 
        return rphy;
index 78aad9582bcfbef87399ac33e02fa60b39712cdc..7d0700091f3d6417e8c37ab6919b6a0de32042a9 100644 (file)
@@ -741,7 +741,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
        hp->duration = jiffies_to_msecs(jiffies);
 /* Now send everything of to mid-level. The next time we hear about this
    packet is when sg_cmd_done() is called (i.e. a callback). */
-       if (scsi_execute_async(sdp->device, cmnd, data_dir, srp->data.buffer,
+       if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer,
                                hp->dxfer_len, srp->data.k_use_sg, timeout,
                                SG_DEFAULT_RETRIES, srp, sg_cmd_done,
                                GFP_ATOMIC)) {
index 13b1d3aac26521cf5b5e1518aaf9d062b5720dbb..7f96f33c1bb1c815b09f88c5458b556ce6c2fc81 100644 (file)
@@ -508,7 +508,7 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
        STp->buffer->cmdstat.have_sense = 0;
        STp->buffer->syscall_result = 0;
 
-       if (scsi_execute_async(STp->device, cmd, direction,
+       if (scsi_execute_async(STp->device, cmd, COMMAND_SIZE(cmd[0]), direction,
                        &((STp->buffer)->sg[0]), bytes, (STp->buffer)->sg_segs,
                               timeout, retries, SRpnt, st_sleep_done, GFP_KERNEL)) {
                /* could not allocate the buffer or request was too large */
index 221999bcf8fe3809b943e4f11c53ba4529810f52..7aef7518b0d1cb4291d38ad3f4e860d67dbbd9a0 100644 (file)
@@ -366,7 +366,7 @@ static struct uart_port serial21285_port = {
        .irq            = NO_IRQ,
        .fifosize       = 16,
        .ops            = &serial21285_ops,
-       .flags          = ASYNC_BOOT_AUTOCONF,
+       .flags          = UPF_BOOT_AUTOCONF,
 };
 
 static void serial21285_setup_ports(void)
index d9ce8c54941651ef0fcd01c56a52b29f6d3c1a5a..179c1f065e60a5662498c006f28c48b83828baa1 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/sysrq.h>
-#include <linux/mca.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/tty.h>
@@ -2026,12 +2025,6 @@ static void serial8250_config_port(struct uart_port *port, int flags)
        int probeflags = PROBE_ANY;
        int ret;
 
-       /*
-        * Don't probe for MCA ports on non-MCA machines.
-        */
-       if (up->port.flags & UPF_BOOT_ONLYMCA && !MCA_bus)
-               return;
-
        /*
         * Find the region that we can probe for.  This in turn
         * tells us whether we can probe for the type of port.
@@ -2164,7 +2157,7 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
 /*
  *     Wait for transmitter & holding register to empty
  */
-static inline void wait_for_xmitr(struct uart_8250_port *up)
+static inline void wait_for_xmitr(struct uart_8250_port *up, int bits)
 {
        unsigned int status, tmout = 10000;
 
@@ -2178,7 +2171,7 @@ static inline void wait_for_xmitr(struct uart_8250_port *up)
                if (--tmout == 0)
                        break;
                udelay(1);
-       } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
+       } while ((status & bits) != bits);
 
        /* Wait up to 1s for flow control if necessary */
        if (up->port.flags & UPF_CONS_FLOW) {
@@ -2218,7 +2211,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
         *      Now, do each character
         */
        for (i = 0; i < count; i++, s++) {
-               wait_for_xmitr(up);
+               wait_for_xmitr(up, UART_LSR_THRE);
 
                /*
                 *      Send the character out.
@@ -2226,7 +2219,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
                 */
                serial_out(up, UART_TX, *s);
                if (*s == 10) {
-                       wait_for_xmitr(up);
+                       wait_for_xmitr(up, UART_LSR_THRE);
                        serial_out(up, UART_TX, 13);
                }
        }
@@ -2235,8 +2228,8 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
         *      Finally, wait for transmitter to become empty
         *      and restore the IER
         */
-       wait_for_xmitr(up);
-       serial_out(up, UART_IER, ier);
+       wait_for_xmitr(up, BOTH_EMPTY);
+       serial_out(up, UART_IER, ier | UART_IER_THRI);
 }
 
 static int serial8250_console_setup(struct console *co, char *options)
@@ -2595,15 +2588,11 @@ static int __init serial8250_init(void)
        if (ret)
                goto out;
 
-       ret = platform_driver_register(&serial8250_isa_driver);
-       if (ret)
-               goto unreg_uart_drv;
-
        serial8250_isa_devs = platform_device_alloc("serial8250",
                                                    PLAT8250_DEV_LEGACY);
        if (!serial8250_isa_devs) {
                ret = -ENOMEM;
-               goto unreg_plat_drv;
+               goto unreg_uart_drv;
        }
 
        ret = platform_device_add(serial8250_isa_devs);
@@ -2612,12 +2601,13 @@ static int __init serial8250_init(void)
 
        serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev);
 
-       goto out;
+       ret = platform_driver_register(&serial8250_isa_driver);
+       if (ret == 0)
+               goto out;
 
+       platform_device_del(serial8250_isa_devs);
  put_dev:
        platform_device_put(serial8250_isa_devs);
- unreg_plat_drv:
-       platform_driver_unregister(&serial8250_isa_driver);
  unreg_uart_drv:
        uart_unregister_driver(&serial8250_reg);
  out:
index 589fb076654a4be48ce4c3d727aafa65c1c12829..2a912153321e47345d9e11ea1698467993d41817 100644 (file)
@@ -940,6 +940,7 @@ enum pci_board_num_t {
        pbn_b2_bt_2_921600,
        pbn_b2_bt_4_921600,
 
+       pbn_b3_2_115200,
        pbn_b3_4_115200,
        pbn_b3_8_115200,
 
@@ -1311,6 +1312,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
                .uart_offset    = 8,
        },
 
+       [pbn_b3_2_115200] = {
+               .flags          = FL_BASE3,
+               .num_ports      = 2,
+               .base_baud      = 115200,
+               .uart_offset    = 8,
+       },
        [pbn_b3_4_115200] = {
                .flags          = FL_BASE3,
                .num_ports      = 4,
@@ -2272,6 +2279,9 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
                pbn_nec_nile4 },
 
+       {       PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_b3_2_115200 },
        {       PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM4,
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
                pbn_b3_4_115200 },
index 5e7199f7b59cda1e9a45da991c2ebc92e0ce4fe8..0d38f0f2ae2975d41f08fa059ab796385be2e71e 100644 (file)
@@ -23,7 +23,7 @@ config SERIAL_8250
          work.)
 
          To compile this driver as a module, choose M here: the
-         module will be called serial.
+         module will be called 8250.
          [WARNING: Do not compile this driver as a module if you are using
          non-standard serial ports, since the configuration information will
          be lost when the driver is unloaded.  This limitation may be lifted
@@ -301,7 +301,7 @@ config SERIAL_AT91_TTYAT
        depends on SERIAL_AT91=y
        help
          Say Y here if you wish to have the five internal AT91RM9200 UARTs
-         appear as /dev/ttyAT0-4 (major 240, minor 0-4) instead of the
+         appear as /dev/ttyAT0-4 (major 204, minor 154-158) instead of the
          normal /dev/ttyS0-4 (major 4, minor 64-68). This is necessary if
          you also want other UARTs, such as external 8250/16C550 compatible
          UARTs.
index 3490022e9fdc7e923d265eb06e635f4dc32e5026..429de2723a1c4d95597038625db48e52a966cae3 100644 (file)
@@ -566,7 +566,7 @@ static struct uart_amba_port amba_ports[UART_NR] = {
                        .uartclk        = 14745600,
                        .fifosize       = 16,
                        .ops            = &amba_pl010_pops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .dtr_mask       = 1 << 5,
@@ -581,7 +581,7 @@ static struct uart_amba_port amba_ports[UART_NR] = {
                        .uartclk        = 14745600,
                        .fifosize       = 16,
                        .ops            = &amba_pl010_pops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 1,
                },
                .dtr_mask       = 1 << 7,
index 0e206063d68580925d3794533ec5703986653c07..2113feb75c39890f6687d64ec80c88b3b56346f9 100644 (file)
@@ -222,8 +222,6 @@ static void at91_rx_chars(struct uart_port *port, struct pt_regs *regs)
        while (status & (AT91_US_RXRDY)) {
                ch = UART_GET_CHAR(port);
 
-               if (tty->flip.count >= TTY_FLIPBUF_SIZE)
-                       goto ignore_char;
                port->icount.rx++;
 
                flg = TTY_NORMAL;
index 8ef999481f9386fe351e77e0a7cd9b98d6250d73..ce7b2e4ecd17c33a6fa12e2451f0e6d5ba276c1a 100644 (file)
@@ -410,7 +410,7 @@ static struct uart_port clps711x_ports[UART_NR] = {
                .fifosize       = 16,
                .ops            = &clps711x_pops,
                .line           = 0,
-               .flags          = ASYNC_BOOT_AUTOCONF,
+               .flags          = UPF_BOOT_AUTOCONF,
        },
        {
                .iobase         = SYSCON2,
@@ -419,7 +419,7 @@ static struct uart_port clps711x_ports[UART_NR] = {
                .fifosize       = 16,
                .ops            = &clps711x_pops,
                .line           = 1,
-               .flags          = ASYNC_BOOT_AUTOCONF,
+               .flags          = UPF_BOOT_AUTOCONF,
        }
 };
 
index 587cc6a9511461732d355f7c32b4dc33117d52b3..858048efe1edbd4da7a5d2d49b5fb587a333ba28 100644 (file)
@@ -402,10 +402,10 @@ static int imx_startup(struct uart_port *port)
                             DRIVER_NAME, sport);
        if (retval) goto error_out2;
 
-       retval = request_irq(sport->rtsirq, imx_rtsint, 0,
+       retval = request_irq(sport->rtsirq, imx_rtsint,
+                            SA_TRIGGER_FALLING | SA_TRIGGER_RISING,
                             DRIVER_NAME, sport);
        if (retval) goto error_out3;
-       set_irq_type(sport->rtsirq, IRQT_BOTHEDGE);
 
        /*
         * Finally, clear and enable interrupts
@@ -674,7 +674,7 @@ static struct imx_port imx_ports[] = {
                .irq            = UART1_MINT_RX,
                .uartclk        = 16000000,
                .fifosize       = 8,
-               .flags          = ASYNC_BOOT_AUTOCONF,
+               .flags          = UPF_BOOT_AUTOCONF,
                .ops            = &imx_pops,
                .line           = 0,
        },
@@ -690,7 +690,7 @@ static struct imx_port imx_ports[] = {
                .irq            = UART2_MINT_RX,
                .uartclk        = 16000000,
                .fifosize       = 8,
-               .flags          = ASYNC_BOOT_AUTOCONF,
+               .flags          = UPF_BOOT_AUTOCONF,
                .ops            = &imx_pops,
                .line           = 1,
        },
index eb4883efb7c65953534797dc913752d5dbcefddf..0a2dd6c5b95fdecc116e78a4090552e31be90eaa 100644 (file)
@@ -1060,7 +1060,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
        dbg("resource %p (%lx..%lx)\n", res, res->start, res->end);
 
        port->mapbase   = res->start;
-       port->membase   = S3C24XX_VA_UART + (res->start - S3C2410_PA_UART);
+       port->membase   = S3C24XX_VA_UART + (res->start - S3C24XX_PA_UART);
        port->irq       = platform_get_irq(platdev, 0);
 
        ourport->clk    = clk_get(&platdev->dev, "uart");
index 1bd93168f504117440acf790f2e0bfbe384d6c5c..ff7b60b4de37171d116bf34d2b9a5abf36cf5926 100644 (file)
@@ -665,21 +665,21 @@ void __init sa1100_register_uart(int idx, int port)
                sa1100_ports[idx].port.membase = (void __iomem *)&Ser1UTCR0;
                sa1100_ports[idx].port.mapbase = _Ser1UTCR0;
                sa1100_ports[idx].port.irq     = IRQ_Ser1UART;
-               sa1100_ports[idx].port.flags   = ASYNC_BOOT_AUTOCONF;
+               sa1100_ports[idx].port.flags   = UPF_BOOT_AUTOCONF;
                break;
 
        case 2:
                sa1100_ports[idx].port.membase = (void __iomem *)&Ser2UTCR0;
                sa1100_ports[idx].port.mapbase = _Ser2UTCR0;
                sa1100_ports[idx].port.irq     = IRQ_Ser2ICP;
-               sa1100_ports[idx].port.flags   = ASYNC_BOOT_AUTOCONF;
+               sa1100_ports[idx].port.flags   = UPF_BOOT_AUTOCONF;
                break;
 
        case 3:
                sa1100_ports[idx].port.membase = (void __iomem *)&Ser3UTCR0;
                sa1100_ports[idx].port.mapbase = _Ser3UTCR0;
                sa1100_ports[idx].port.irq     = IRQ_Ser3UART;
-               sa1100_ports[idx].port.flags   = ASYNC_BOOT_AUTOCONF;
+               sa1100_ports[idx].port.flags   = UPF_BOOT_AUTOCONF;
                break;
 
        default:
index 943770470b9db316af2ed25cce1fca9296df43a2..0717abfdae06f1eed3d132cdbd7e7a3cced60858 100644 (file)
@@ -332,7 +332,7 @@ uart_get_baud_rate(struct uart_port *port, struct termios *termios,
                   struct termios *old, unsigned int min, unsigned int max)
 {
        unsigned int try, baud, altbaud = 38400;
-       unsigned int flags = port->flags & UPF_SPD_MASK;
+       upf_t flags = port->flags & UPF_SPD_MASK;
 
        if (flags == UPF_SPD_HI)
                altbaud = 57600;
@@ -615,8 +615,9 @@ static int uart_set_info(struct uart_state *state,
        struct serial_struct new_serial;
        struct uart_port *port = state->port;
        unsigned long new_port;
-       unsigned int change_irq, change_port, old_flags, closing_wait;
+       unsigned int change_irq, change_port, closing_wait;
        unsigned int old_custom_divisor, close_delay;
+       upf_t old_flags, new_flags;
        int retval = 0;
 
        if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
@@ -655,6 +656,7 @@ static int uart_set_info(struct uart_state *state,
                      new_serial.type != port->type;
 
        old_flags = port->flags;
+       new_flags = new_serial.flags;
        old_custom_divisor = port->custom_divisor;
 
        if (!capable(CAP_SYS_ADMIN)) {
@@ -664,10 +666,10 @@ static int uart_set_info(struct uart_state *state,
                    (close_delay != state->close_delay) ||
                    (closing_wait != state->closing_wait) ||
                    (new_serial.xmit_fifo_size != port->fifosize) ||
-                   (((new_serial.flags ^ old_flags) & ~UPF_USR_MASK) != 0))
+                   (((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0))
                        goto exit;
                port->flags = ((port->flags & ~UPF_USR_MASK) |
-                              (new_serial.flags & UPF_USR_MASK));
+                              (new_flags & UPF_USR_MASK));
                port->custom_divisor = new_serial.custom_divisor;
                goto check_and_exit;
        }
@@ -764,7 +766,7 @@ static int uart_set_info(struct uart_state *state,
        port->irq              = new_serial.irq;
        port->uartclk          = new_serial.baud_base * 16;
        port->flags            = (port->flags & ~UPF_CHANGE_MASK) |
-                                (new_serial.flags & UPF_CHANGE_MASK);
+                                (new_flags & UPF_CHANGE_MASK);
        port->custom_divisor   = new_serial.custom_divisor;
        state->close_delay     = close_delay;
        state->closing_wait    = closing_wait;
@@ -1870,7 +1872,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *port)
        mutex_lock(&state->mutex);
 
        if (state->info && state->info->flags & UIF_INITIALIZED) {
-               struct uart_ops *ops = port->ops;
+               const struct uart_ops *ops = port->ops;
 
                spin_lock_irq(&port->lock);
                ops->stop_tx(port);
@@ -1932,7 +1934,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port)
        }
 
        if (state->info && state->info->flags & UIF_INITIALIZED) {
-               struct uart_ops *ops = port->ops;
+               const struct uart_ops *ops = port->ops;
                int ret;
 
                ops->set_mctrl(port, 0);
index d4a1f0e798c1bc461f737ec343e92d196608183f..d0490f67f597fa1be96f0271be6b99339216339b 100644 (file)
@@ -506,7 +506,7 @@ static struct uart_port_lh7a40x lh7a40x_ports[DEV_NR] = {
                        .uartclk        = 14745600/2,
                        .fifosize       = 16,
                        .ops            = &lh7a40x_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
        },
@@ -519,7 +519,7 @@ static struct uart_port_lh7a40x lh7a40x_ports[DEV_NR] = {
                        .uartclk        = 14745600/2,
                        .fifosize       = 16,
                        .ops            = &lh7a40x_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 1,
                },
        },
@@ -532,7 +532,7 @@ static struct uart_port_lh7a40x lh7a40x_ports[DEV_NR] = {
                        .uartclk        = 14745600/2,
                        .fifosize       = 16,
                        .ops            = &lh7a40x_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 2,
                },
        },
index a9e070759628558c970487fa48209d40139aa4ef..0111206327cac4505c2469cd326b7b09ba490fbf 100644 (file)
@@ -1113,10 +1113,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xfffffe80,
                        .mapbase        = 0xfffffe80,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 25,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .type           = PORT_SCI,
@@ -1128,10 +1128,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)SCIF0,
                        .mapbase        = SCIF0,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 55,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .type           = PORT_SCIF,
@@ -1142,10 +1142,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)SCIF2,
                        .mapbase        = SCIF2,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 59,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 1,
                },
                .type           = PORT_SCIF,
@@ -1157,10 +1157,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xfffffe80,
                        .mapbase        = 0xfffffe80,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 25,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .type           = PORT_SCI,
@@ -1171,10 +1171,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xa4000150,
                        .mapbase        = 0xa4000150,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 59,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 1,
                },
                .type           = PORT_SCIF,
@@ -1185,10 +1185,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xa4000140,
                        .mapbase        = 0xa4000140,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 55,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 2,
                },
                .type           = PORT_IRDA,
@@ -1200,10 +1200,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xA4430000,
                        .mapbase        = 0xA4430000,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 25,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .type           = PORT_SCIF,
@@ -1215,10 +1215,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xffe00000,
                        .mapbase        = 0xffe00000,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 25,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .type           = PORT_SCIF,
@@ -1230,10 +1230,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xffe80000,
                        .mapbase        = 0xffe80000,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 43,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .type           = PORT_SCIF,
@@ -1245,10 +1245,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xffe00000,
                        .mapbase        = 0xffe00000,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 25,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .type           = PORT_SCI,
@@ -1259,10 +1259,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xffe80000,
                        .mapbase        = 0xffe80000,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 43,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 1,
                },
                .type           = PORT_SCIF,
@@ -1274,10 +1274,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xfe600000,
                        .mapbase        = 0xfe600000,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 55,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .type           = PORT_SCIF,
@@ -1288,10 +1288,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xfe610000,
                        .mapbase        = 0xfe610000,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 75,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 1,
                },
                .type           = PORT_SCIF,
@@ -1302,10 +1302,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xfe620000,
                        .mapbase        = 0xfe620000,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 79,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 2,
                },
                .type           = PORT_SCIF,
@@ -1317,10 +1317,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xffe80000,
                        .mapbase        = 0xffe80000,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 43,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .type           = PORT_SCIF,
@@ -1332,10 +1332,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xffe00000,
                        .mapbase        = 0xffe00000,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 26,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .type           = PORT_SCIF,
@@ -1346,10 +1346,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0xffe80000,
                        .mapbase        = 0xffe80000,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 43,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 1,
                },
                .type           = PORT_SCIF,
@@ -1359,10 +1359,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
 #elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
        {
                .port   = {
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 42,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .type           = PORT_SCIF,
@@ -1374,10 +1374,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0x00ffffb0,
                        .mapbase        = 0x00ffffb0,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 54,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .type           = PORT_SCI,
@@ -1388,10 +1388,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0x00ffffb8,
                        .mapbase        = 0x00ffffb8,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 58,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 1,
                },
                .type           = PORT_SCI,
@@ -1402,10 +1402,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0x00ffffc0,
                        .mapbase        = 0x00ffffc0,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 62,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 2,
                },
                .type           = PORT_SCI,
@@ -1417,10 +1417,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0x00ffff78,
                        .mapbase        = 0x00ffff78,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 90,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 0,
                },
                .type           = PORT_SCI,
@@ -1431,10 +1431,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0x00ffff80,
                        .mapbase        = 0x00ffff80,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 94,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 1,
                },
                .type           = PORT_SCI,
@@ -1445,10 +1445,10 @@ static struct sci_port sci_ports[SCI_NPORTS] = {
                .port   = {
                        .membase        = (void *)0x00ffff88,
                        .mapbase        = 0x00ffff88,
-                       .iotype         = SERIAL_IO_MEM,
+                       .iotype         = UPIO_MEM,
                        .irq            = 98,
                        .ops            = &sci_uart_ops,
-                       .flags          = ASYNC_BOOT_AUTOCONF,
+                       .flags          = UPF_BOOT_AUTOCONF,
                        .line           = 2,
                },
                .type           = PORT_SCI,
index 5468e5a767e21bf9bcbf951f21b05dbbdb17b3b7..43e67d6c29d448a14923ec8c4df45973ac252030 100644 (file)
@@ -6,7 +6,7 @@
  * driver for that.
  *
  *
- * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2006 Silicon Graphics, Inc.  All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License
@@ -829,8 +829,8 @@ static int __init sn_sal_module_init(void)
                misc.name = DEVICE_NAME_DYNAMIC;
                retval = misc_register(&misc);
                if (retval != 0) {
-                       printk
-                           ("Failed to register console device using misc_register.\n");
+                       printk(KERN_WARNING "Failed to register console "
+                              "device using misc_register.\n");
                        return -ENODEV;
                }
                sal_console_uart.major = MISC_MAJOR;
@@ -942,88 +942,75 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
 {
        unsigned long flags = 0;
        struct sn_cons_port *port = &sal_console_port;
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
        static int stole_lock = 0;
-#endif
 
        BUG_ON(!port->sc_is_asynch);
 
        /* We can't look at the xmit buffer if we're not registered with serial core
         *  yet.  So only do the fancy recovery after registering
         */
-       if (port->sc_port.info) {
-
-               /* somebody really wants this output, might be an
-                * oops, kdb, panic, etc.  make sure they get it. */
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
-               if (spin_is_locked(&port->sc_port.lock)) {
-                       int lhead = port->sc_port.info->xmit.head;
-                       int ltail = port->sc_port.info->xmit.tail;
-                       int counter, got_lock = 0;
+       if (!port->sc_port.info) {
+               /* Not yet registered with serial core - simple case */
+               puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
+               return;
+       }
 
-                       /*
-                        * We attempt to determine if someone has died with the
-                        * lock. We wait ~20 secs after the head and tail ptrs
-                        * stop moving and assume the lock holder is not functional
-                        * and plow ahead. If the lock is freed within the time out
-                        * period we re-get the lock and go ahead normally. We also
-                        * remember if we have plowed ahead so that we don't have
-                        * to wait out the time out period again - the asumption
-                        * is that we will time out again.
-                        */
+       /* somebody really wants this output, might be an
+        * oops, kdb, panic, etc.  make sure they get it. */
+       if (spin_is_locked(&port->sc_port.lock)) {
+               int lhead = port->sc_port.info->xmit.head;
+               int ltail = port->sc_port.info->xmit.tail;
+               int counter, got_lock = 0;
+
+               /*
+                * We attempt to determine if someone has died with the
+                * lock. We wait ~20 secs after the head and tail ptrs
+                * stop moving and assume the lock holder is not functional
+                * and plow ahead. If the lock is freed within the time out
+                * period we re-get the lock and go ahead normally. We also
+                * remember if we have plowed ahead so that we don't have
+                * to wait out the time out period again - the asumption
+                * is that we will time out again.
+                */
 
-                       for (counter = 0; counter < 150; mdelay(125), counter++) {
-                               if (!spin_is_locked(&port->sc_port.lock)
-                                   || stole_lock) {
-                                       if (!stole_lock) {
-                                               spin_lock_irqsave(&port->
-                                                                 sc_port.lock,
-                                                                 flags);
-                                               got_lock = 1;
-                                       }
-                                       break;
-                               } else {
-                                       /* still locked */
-                                       if ((lhead !=
-                                            port->sc_port.info->xmit.head)
-                                           || (ltail !=
-                                               port->sc_port.info->xmit.
-                                               tail)) {
-                                               lhead =
-                                                   port->sc_port.info->xmit.
-                                                   head;
-                                               ltail =
-                                                   port->sc_port.info->xmit.
-                                                   tail;
-                                               counter = 0;
-                                       }
+               for (counter = 0; counter < 150; mdelay(125), counter++) {
+                       if (!spin_is_locked(&port->sc_port.lock)
+                           || stole_lock) {
+                               if (!stole_lock) {
+                                       spin_lock_irqsave(&port->sc_port.lock,
+                                                         flags);
+                                       got_lock = 1;
                                }
-                       }
-                       /* flush anything in the serial core xmit buffer, raw */
-                       sn_transmit_chars(port, 1);
-                       if (got_lock) {
-                               spin_unlock_irqrestore(&port->sc_port.lock,
-                                                      flags);
-                               stole_lock = 0;
+                               break;
                        } else {
-                               /* fell thru */
-                               stole_lock = 1;
+                               /* still locked */
+                               if ((lhead != port->sc_port.info->xmit.head)
+                                   || (ltail !=
+                                       port->sc_port.info->xmit.tail)) {
+                                       lhead =
+                                               port->sc_port.info->xmit.head;
+                                       ltail =
+                                               port->sc_port.info->xmit.tail;
+                                       counter = 0;
+                               }
                        }
-                       puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
-               } else {
-                       stole_lock = 0;
-#endif
-                       spin_lock_irqsave(&port->sc_port.lock, flags);
-                       sn_transmit_chars(port, 1);
+               }
+               /* flush anything in the serial core xmit buffer, raw */
+               sn_transmit_chars(port, 1);
+               if (got_lock) {
                        spin_unlock_irqrestore(&port->sc_port.lock, flags);
-
-                       puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+                       stole_lock = 0;
+               } else {
+                       /* fell thru */
+                       stole_lock = 1;
                }
-#endif
-       }
-       else {
-               /* Not yet registered with serial core - simple case */
+               puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
+       } else {
+               stole_lock = 0;
+               spin_lock_irqsave(&port->sc_port.lock, flags);
+               sn_transmit_chars(port, 1);
+               spin_unlock_irqrestore(&port->sc_port.lock, flags);
+
                puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
        }
 }
index 5fc4a62173d9c6f0d3b626f135c0a6f478a3626f..fa4ae94243c21a070bf25d23af022648258172e7 100644 (file)
@@ -34,6 +34,7 @@ sunserial_console_termios(struct console *con)
        char *mode_prop = "ttyX-mode";
        char *cd_prop = "ttyX-ignore-cd";
        char *dtr_prop = "ttyX-rts-dtr-off";
+       char *ssp_console_modes_prop = "ssp-console-modes";
        int baud, bits, stop, cflag;
        char parity;
        int carrier = 0;
@@ -43,14 +44,39 @@ sunserial_console_termios(struct console *con)
        if (!serial_console)
                return;
 
-       if (serial_console == 1) {
+       switch (serial_console) {
+       case PROMDEV_OTTYA:
                mode_prop[3] = 'a';
                cd_prop[3] = 'a';
                dtr_prop[3] = 'a';
-       } else {
+               break;
+
+       case PROMDEV_OTTYB:
                mode_prop[3] = 'b';
                cd_prop[3] = 'b';
                dtr_prop[3] = 'b';
+               break;
+
+       case PROMDEV_ORSC:
+
+               nd = prom_pathtoinode("rsc");
+               if (!nd) {
+                       strcpy(mode, "115200,8,n,1,-");
+                       goto no_options;
+               }
+
+               if (!prom_node_has_property(nd, ssp_console_modes_prop)) {
+                       strcpy(mode, "115200,8,n,1,-");
+                       goto no_options;
+               }
+
+               memset(mode, 0, sizeof(mode));
+               prom_getstring(nd, ssp_console_modes_prop, mode, sizeof(mode));
+               goto no_options;
+
+       default:
+               strcpy(mode, "9600,8,n,1,-");
+               goto no_options;
        }
 
        topnd = prom_getchild(prom_root_node);
@@ -110,6 +136,10 @@ no_options:
                case 9600: cflag |= B9600; break;
                case 19200: cflag |= B19200; break;
                case 38400: cflag |= B38400; break;
+               case 57600: cflag |= B57600; break;
+               case 115200: cflag |= B115200; break;
+               case 230400: cflag |= B230400; break;
+               case 460800: cflag |= B460800; break;
                default: baud = 9600; cflag |= B9600; break;
        }
 
index 7e773ff76c6106721f42b0dce9aa5cd1f43c123c..8bcaebcc0ad7b20e1986df7e74c4968071b14fe9 100644 (file)
@@ -897,9 +897,6 @@ static int sunsab_console_setup(struct console *con, char *options)
 
        sunserial_console_termios(con);
 
-       /* Firmware console speed is limited to 150-->38400 baud so
-        * this hackish cflag thing is OK.
-        */
        switch (con->cflag & CBAUD) {
        case B150: baud = 150; break;
        case B300: baud = 300; break;
@@ -910,6 +907,10 @@ static int sunsab_console_setup(struct console *con, char *options)
        default: case B9600: baud = 9600; break;
        case B19200: baud = 19200; break;
        case B38400: baud = 38400; break;
+       case B57600: baud = 57600; break;
+       case B115200: baud = 115200; break;
+       case B230400: baud = 230400; break;
+       case B460800: baud = 460800; break;
        };
 
        /*
index 9a3665b34d97218dca97b18bbf60740bb45dece7..bc67442c6b4ce92f227cf62c81b7f507c11cb6e5 100644 (file)
@@ -669,7 +669,7 @@ static int sunsu_startup(struct uart_port *port)
         * if it is, then bail out, because there's likely no UART
         * here.
         */
-       if (!(up->port.flags & ASYNC_BUGGY_UART) &&
+       if (!(up->port.flags & UPF_BUGGY_UART) &&
            (serial_inp(up, UART_LSR) == 0xff)) {
                printk("ttyS%d: LSR safety check engaged!\n", up->port.line);
                return -ENODEV;
@@ -707,7 +707,7 @@ static int sunsu_startup(struct uart_port *port)
        up->ier = UART_IER_RLSI | UART_IER_RDI;
        serial_outp(up, UART_IER, up->ier);
 
-       if (up->port.flags & ASYNC_FOURPORT) {
+       if (up->port.flags & UPF_FOURPORT) {
                unsigned int icp;
                /*
                 * Enable interrupts on the AST Fourport board
@@ -740,7 +740,7 @@ static void sunsu_shutdown(struct uart_port *port)
        serial_outp(up, UART_IER, 0);
 
        spin_lock_irqsave(&up->port.lock, flags);
-       if (up->port.flags & ASYNC_FOURPORT) {
+       if (up->port.flags & UPF_FOURPORT) {
                /* reset interrupts on the AST Fourport board */
                inb((up->port.iobase & 0xfe0) | 0x1f);
                up->port.mctrl |= TIOCM_OUT1;
@@ -1132,7 +1132,7 @@ ebus_done:
 
        spin_lock_irqsave(&up->port.lock, flags);
 
-       if (!(up->port.flags & ASYNC_BUGGY_UART)) {
+       if (!(up->port.flags & UPF_BUGGY_UART)) {
                /*
                 * Do a simple existence test first; if we fail this, there's
                 * no point trying anything else.
@@ -1170,7 +1170,7 @@ ebus_done:
         * manufacturer would be stupid enough to design a board
         * that conflicts with COM 1-4 --- we hope!
         */
-       if (!(up->port.flags & ASYNC_SKIP_TEST)) {
+       if (!(up->port.flags & UPF_SKIP_TEST)) {
                serial_outp(up, UART_MCR, UART_MCR_LOOP | 0x0A);
                status1 = serial_inp(up, UART_MSR) & 0xF0;
                serial_outp(up, UART_MCR, save_mcr);
@@ -1371,7 +1371,7 @@ static __inline__ void wait_for_xmitr(struct uart_sunsu_port *up)
        } while ((status & BOTH_EMPTY) != BOTH_EMPTY);
 
        /* Wait up to 1s for flow control if necessary */
-       if (up->port.flags & ASYNC_CONS_FLOW) {
+       if (up->port.flags & UPF_CONS_FLOW) {
                tmout = 1000000;
                while (--tmout &&
                       ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
@@ -1513,7 +1513,7 @@ static int __init sunsu_serial_init(void)
                    up->su_type == SU_PORT_KBD)
                        continue;
 
-               up->port.flags |= ASYNC_BOOT_AUTOCONF;
+               up->port.flags |= UPF_BOOT_AUTOCONF;
                up->port.type = PORT_UNKNOWN;
                up->port.uartclk = (SU_BASE_BAUD * 16);
 
index b2187175d03fcde37c6daa66f240b0cdb36c4a71..6761b68c35e97182046e1774d1755ef810423bb7 100644 (file)
@@ -116,9 +116,10 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
        int ret = 0;
 
        memset(&var->transp, 0, sizeof(var->transp));
-       memset(&var->red, 0, sizeof(var->red));
-       memset(&var->green, 0, sizeof(var->green));
-       memset(&var->blue, 0, sizeof(var->blue));
+
+       var->red.msb_right = 0;
+       var->green.msb_right = 0;
+       var->blue.msb_right = 0;
 
        switch (var->bits_per_pixel) {
        case 1:
@@ -133,34 +134,20 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
                var->blue.offset        = 0;
                break;
        case 16:
-               var->red.length         = 5;
-               var->green.length       = 6;
-               var->blue.length        = 5;
-               if (fb->panel->cntl & CNTL_BGR) {
-                       var->red.offset         = 11;
-                       var->green.offset       = 5;
-                       var->blue.offset        = 0;
-               } else {
-                       var->red.offset         = 0;
-                       var->green.offset       = 5;
-                       var->blue.offset        = 11;
-               }
+               var->red.length = 5;
+               var->blue.length = 5;
+               /*
+                * Green length can be 5 or 6 depending whether
+                * we're operating in RGB555 or RGB565 mode.
+                */
+               if (var->green.length != 5 && var->green.length != 6)
+                       var->green.length = 6;
                break;
        case 32:
                if (fb->panel->cntl & CNTL_LCDTFT) {
                        var->red.length         = 8;
                        var->green.length       = 8;
                        var->blue.length        = 8;
-
-                       if (fb->panel->cntl & CNTL_BGR) {
-                               var->red.offset         = 16;
-                               var->green.offset       = 8;
-                               var->blue.offset        = 0;
-                       } else {
-                               var->red.offset         = 0;
-                               var->green.offset       = 8;
-                               var->blue.offset        = 16;
-                       }
                        break;
                }
        default:
@@ -168,6 +155,23 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
                break;
        }
 
+       /*
+        * >= 16bpp displays have separate colour component bitfields
+        * encoded in the pixel data.  Calculate their position from
+        * the bitfield length defined above.
+        */
+       if (ret == 0 && var->bits_per_pixel >= 16) {
+               if (fb->panel->cntl & CNTL_BGR) {
+                       var->blue.offset = 0;
+                       var->green.offset = var->blue.offset + var->blue.length;
+                       var->red.offset = var->green.offset + var->green.length;
+               } else {
+                       var->red.offset = 0;
+                       var->green.offset = var->red.offset + var->red.length;
+                       var->blue.offset = var->green.offset + var->green.length;
+               }
+       }
+
        return ret;
 }
 
index 55e6e2d60d3a8de3a715e450ffee203b1d6af6d5..a4d7cc51ce0be8733f96d0bcc9f615c69ef7a027 100644 (file)
@@ -199,8 +199,7 @@ struct  fbcmap32 {
 #define FBIOPUTCMAP32  _IOW('F', 3, struct fbcmap32)
 #define FBIOGETCMAP32  _IOW('F', 4, struct fbcmap32)
 
-static int fbiogetputcmap(struct file *file, struct fb_info *info,
-               unsigned int cmd, unsigned long arg)
+static int fbiogetputcmap(struct fb_info *info, unsigned int cmd, unsigned long arg)
 {
        struct fbcmap32 __user *argp = (void __user *)arg;
        struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
@@ -236,8 +235,7 @@ struct fbcursor32 {
 #define FBIOSCURSOR32  _IOW('F', 24, struct fbcursor32)
 #define FBIOGCURSOR32  _IOW('F', 25, struct fbcursor32)
 
-static int fbiogscursor(struct file *file, struct fb_info *info,
-               unsigned long arg)
+static int fbiogscursor(struct fb_info *info, unsigned long arg)
 {
        struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
        struct fbcursor32 __user *argp =  (void __user *)arg;
@@ -263,8 +261,7 @@ static int fbiogscursor(struct file *file, struct fb_info *info,
        return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p);
 }
 
-long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
-               unsigned long arg)
+int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
 {
        switch (cmd) {
        case FBIOGTYPE:
index f753939013ed807b763631b25ea8a790447d0ab1..492828c3fe8fcf4c8a3d9992bea57ef51a44a89f 100644 (file)
@@ -20,7 +20,7 @@ extern int sbusfb_mmap_helper(struct sbus_mmap_map *map,
 int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
                        struct fb_info *info,
                        int type, int fb_depth, unsigned long fb_size);
-long sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
+int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
                unsigned long arg);
 
 #endif /* _SBUSLIB_H */
index 3d023089707efe87df1d17e276ae8598971db1ee..2f4ce43f7b6c57e8fdb6bcd14143b8cc77f63288 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_9P_FS) := 9p2000.o
        conv.o \
        vfs_super.o \
        vfs_inode.o \
+       vfs_addr.o \
        vfs_file.o \
        vfs_dir.o \
        vfs_dentry.o \
index c78502ad00ed3d1dcd52da9a28c556aa9c675adc..69cf2905dc90e468ba949e74f5a8ba4681aaf333 100644 (file)
@@ -39,6 +39,7 @@
  */
 
 extern struct file_system_type v9fs_fs_type;
+extern struct address_space_operations v9fs_addr_operations;
 extern struct file_operations v9fs_file_operations;
 extern struct file_operations v9fs_dir_operations;
 extern struct dentry_operations v9fs_dentry_operations;
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
new file mode 100644 (file)
index 0000000..8100fb5
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ *  linux/fs/9p/vfs_addr.c
+ *
+ * This file contians vfs address (mmap) ops for 9P2000.
+ *
+ *  Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
+ *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to:
+ *  Free Software Foundation
+ *  51 Franklin Street, Fifth Floor
+ *  Boston, MA  02111-1301  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/inet.h>
+#include <linux/version.h>
+#include <linux/pagemap.h>
+#include <linux/idr.h>
+
+#include "debug.h"
+#include "v9fs.h"
+#include "9p.h"
+#include "v9fs_vfs.h"
+#include "fid.h"
+
+/**
+ * v9fs_vfs_readpage - read an entire page in from 9P
+ *
+ * @file: file being read
+ * @page: structure to page
+ *
+ */
+
+static int v9fs_vfs_readpage(struct file *filp, struct page *page)
+{
+       char *buffer = NULL;
+       int retval = -EIO;
+       loff_t offset = page_offset(page);
+       int count = PAGE_CACHE_SIZE;
+       struct inode *inode = filp->f_dentry->d_inode;
+       struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
+       int rsize = v9ses->maxdata - V9FS_IOHDRSZ;
+       struct v9fs_fid *v9f = filp->private_data;
+       struct v9fs_fcall *fcall = NULL;
+       int fid = v9f->fid;
+       int total = 0;
+       int result = 0;
+
+       buffer = kmap(page);
+       do {
+               if (count < rsize)
+                       rsize = count;
+
+               result = v9fs_t_read(v9ses, fid, offset, rsize, &fcall);
+
+               if (result < 0) {
+                       printk(KERN_ERR "v9fs_t_read returned %d\n",
+                              result);
+
+                       kfree(fcall);
+                       goto UnmapAndUnlock;
+               } else
+                       offset += result;
+
+               memcpy(buffer, fcall->params.rread.data, result);
+
+               count -= result;
+               buffer += result;
+               total += result;
+
+               kfree(fcall);
+
+               if (result < rsize)
+                       break;
+       } while (count);
+
+       memset(buffer, 0, count);
+       flush_dcache_page(page);
+       SetPageUptodate(page);
+       retval = 0;
+
+UnmapAndUnlock:
+       kunmap(page);
+       unlock_page(page);
+       return retval;
+}
+
+struct address_space_operations v9fs_addr_operations = {
+      .readpage = v9fs_vfs_readpage,
+};
index 6852f0eb96ed3a81641a75370bd2a2ce23df4ac7..c7e14d917215c0ca235db344b77aef8aebd285cf 100644 (file)
@@ -289,6 +289,9 @@ v9fs_file_write(struct file *filp, const char __user * data,
                total += result;
        } while (count);
 
+       if(inode->i_mapping->nrpages)
+               invalidate_inode_pages2(inode->i_mapping);
+
        return total;
 }
 
@@ -299,4 +302,5 @@ struct file_operations v9fs_file_operations = {
        .open = v9fs_file_open,
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock,
+       .mmap = generic_file_mmap,
 };
index a17b28854288f641cffb55abac436f85270b90ae..91f552454c7656470eecd32a6d00513e5b54d81f 100644 (file)
@@ -177,6 +177,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
                inode->i_blocks = 0;
                inode->i_rdev = 0;
                inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+               inode->i_mapping->a_ops = &v9fs_addr_operations;
 
                switch (mode & S_IFMT) {
                case S_IFIFO:
index bbc442b8c86722dbe69651fb593bfe7d3f6c936c..1f3bb501c262bc9b94ad9e0b73c5af8acc511c90 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -411,6 +411,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
 
 /**
  *     bio_add_pc_page -       attempt to add page to bio
+ *     @q: the target queue
  *     @bio: destination bio
  *     @page: page to add
  *     @len: vec entry length
index 943ef9b82244ef9b71a3211e24bb1a1899f51409..d335015473a520e518171012d9a6fdb627bcee74 100644 (file)
@@ -1,3 +1,11 @@
+Version 1.40
+------------
+Use fsuid (fsgid) more consistently instead of uid (gid). Improve performance
+of readpages by eliminating one extra memcpy. Allow update of file size
+from remote server even if file is open for write as long as mount is
+directio.  Recognize share mode security and send NTLM encrypted password
+on tree connect if share mode negotiated.
+
 Version 1.39
 ------------
 Defer close of a file handle slightly if pending writes depend on that handle
@@ -7,6 +15,8 @@ Fix SFU style symlinks and mknod needed for servers which do not support the
 CIFS Unix Extensions.  Fix setfacl/getfacl on bigendian. Timeout negative
 dentries so files that the client sees as deleted but that later get created
 on the server will be recognized.  Add client side permission check on setattr.
+Timeout stuck requests better (where server has never responded or sent corrupt
+responses)
 
 Version 1.38
 ------------
index e5d09a2fc7a5eccf7a7b8f15b56de07173a5d535..b0070d1b149d6d240e2457bd59455b945faa7005 100644 (file)
@@ -436,7 +436,17 @@ A partial list of the supported mount options follows:
                SFU does).  In the future the bottom 9 bits of the mode
                mode also will be emulated using queries of the security
                descriptor (ACL).
-               
+sec            Security mode.  Allowed values are:
+                       none    attempt to connection as a null user (no name)
+                       krb5    Use Kerberos version 5 authentication
+                       krb5i   Use Kerberos authentication and packet signing
+                       ntlm    Use NTLM password hashing (default)
+                       ntlmi   Use NTLM password hashing with signing (if
+                               /proc/fs/cifs/PacketSigningEnabled on or if
+                               server requires signing also can be the default) 
+                       ntlmv2  Use NTLMv2 password hashing      
+                       ntlmv2i Use NTLMv2 password hashing with packet signing
+
 The mount.cifs mount helper also accepts a few mount options before -o
 including:
 
index 22a444a3fe4c2fb9325a0fdaa2e4eb09d3707670..f4124a32bef8974edd3f02111f8e382a63f02199 100644 (file)
@@ -219,6 +219,10 @@ cifs_stats_write(struct file *file, const char __user *buffer,
 
         if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
                read_lock(&GlobalSMBSeslock);
+#ifdef CONFIG_CIFS_STATS2
+               atomic_set(&totBufAllocCount, 0);
+               atomic_set(&totSmBufAllocCount, 0);
+#endif /* CONFIG_CIFS_STATS2 */
                list_for_each(tmp, &GlobalTreeConnectionList) {
                        tcon = list_entry(tmp, struct cifsTconInfo,
                                        cifsConnectionList);
@@ -276,6 +280,14 @@ cifs_stats_read(char *buf, char **beginBuffer, off_t offset,
                        smBufAllocCount.counter,cifs_min_small);
        length += item_length;
        buf += item_length;
+#ifdef CONFIG_CIFS_STATS2
+        item_length = sprintf(buf, "Total Large %d Small %d Allocations\n",
+                               atomic_read(&totBufAllocCount),
+                               atomic_read(&totSmBufAllocCount));
+       length += item_length;
+       buf += item_length;
+#endif /* CONFIG_CIFS_STATS2 */
+
        item_length = 
                sprintf(buf,"Operations (MIDs): %d\n",
                        midCount.counter);
@@ -389,8 +401,8 @@ static read_proc_t ntlmv2_enabled_read;
 static write_proc_t ntlmv2_enabled_write;
 static read_proc_t packet_signing_enabled_read;
 static write_proc_t packet_signing_enabled_write;
-static read_proc_t quotaEnabled_read;
-static write_proc_t quotaEnabled_write;
+static read_proc_t experimEnabled_read;
+static write_proc_t experimEnabled_write;
 static read_proc_t linuxExtensionsEnabled_read;
 static write_proc_t linuxExtensionsEnabled_write;
 
@@ -430,9 +442,9 @@ cifs_proc_init(void)
                pde->write_proc = oplockEnabled_write;
 
        pde = create_proc_read_entry("Experimental", 0, proc_fs_cifs,
-                               quotaEnabled_read, NULL);
+                               experimEnabled_read, NULL);
        if (pde)
-               pde->write_proc = quotaEnabled_write;
+               pde->write_proc = experimEnabled_write;
 
        pde = create_proc_read_entry("LinuxExtensionsEnabled", 0, proc_fs_cifs,
                                linuxExtensionsEnabled_read, NULL);
@@ -574,14 +586,13 @@ oplockEnabled_write(struct file *file, const char __user *buffer,
 }
 
 static int
-quotaEnabled_read(char *page, char **start, off_t off,
+experimEnabled_read(char *page, char **start, off_t off,
                    int count, int *eof, void *data)
 {
         int len;
 
         len = sprintf(page, "%d\n", experimEnabled);
-/* could also check if quotas are enabled in kernel
-       as a whole first */
+
         len -= off;
         *start = page + off;
 
@@ -596,21 +607,23 @@ quotaEnabled_read(char *page, char **start, off_t off,
         return len;
 }
 static int
-quotaEnabled_write(struct file *file, const char __user *buffer,
+experimEnabled_write(struct file *file, const char __user *buffer,
                     unsigned long count, void *data)
 {
-        char c;
-        int rc;
+       char c;
+       int rc;
 
-        rc = get_user(c, buffer);
-        if (rc)
-                return rc;
-        if (c == '0' || c == 'n' || c == 'N')
-                experimEnabled = 0;
-        else if (c == '1' || c == 'y' || c == 'Y')
-                experimEnabled = 1;
+       rc = get_user(c, buffer);
+       if (rc)
+               return rc;
+       if (c == '0' || c == 'n' || c == 'N')
+               experimEnabled = 0;
+       else if (c == '1' || c == 'y' || c == 'Y')
+               experimEnabled = 1;
+       else if (c == '2')
+               experimEnabled = 2;
 
-        return count;
+       return count;
 }
 
 static int
@@ -620,8 +633,6 @@ linuxExtensionsEnabled_read(char *page, char **start, off_t off,
         int len;
 
         len = sprintf(page, "%d\n", linuxExtEnabled);
-/* could also check if quotas are enabled in kernel
-       as a whole first */
         len -= off;
         *start = page + off;
 
index f799f6f0e7296927af5bfc7203d2c5b489441de5..ad58eb0c4d6d8d5600165490a8e0f7e7ab7f843c 100644 (file)
 #define CIFS_MOUNT_DIRECT_IO    8 /* do not write nor read through page cache */
 #define CIFS_MOUNT_NO_XATTR  0x10 /* if set - disable xattr support */
 #define CIFS_MOUNT_MAP_SPECIAL_CHR 0x20 /* remap illegal chars in filenames */
-#define CIFS_MOUNT_POSIX_PATHS 0x40 /* Negotiate posix pathnames if possible. */
-#define CIFS_MOUNT_UNX_EMUL    0x80 /* Network compat with SFUnix emulation */
+#define CIFS_MOUNT_POSIX_PATHS 0x40 /* Negotiate posix pathnames if possible. */
+#define CIFS_MOUNT_UNX_EMUL    0x80 /* Network compat with SFUnix emulation */
 #define CIFS_MOUNT_NO_BRL      0x100 /* No sending byte range locks to srv */
+#define CIFS_MOUNT_CIFS_ACL    0x200 /* send ACL requests to non-POSIX srv */
 
 struct cifs_sb_info {
        struct cifsTconInfo *tcon;      /* primary mount */
diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
new file mode 100644 (file)
index 0000000..d0776ac
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ *   fs/cifs/cifsacl.h
+ *
+ *   Copyright (c) International Business Machines  Corp., 2005
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ *   You should have received a copy of the GNU Lesser General Public License
+ *   along with this library; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _CIFSACL_H
+#define _CIFSACL_H
+
+struct cifs_sid {
+       __u8 revision; /* revision level */
+       __u8 num_subauths;
+       __u8 authority[6];
+       __u32 sub_auth[4];
+       /* next sub_auth if any ... */
+} __attribute__((packed));
+
+/* everyone */
+extern const struct cifs_sid sid_everyone;
+/* group users */
+extern const struct cifs_sid sid_user;
+
+#endif /* _CIFSACL_H */
index fe2bb7c4c9121c4bcc2cfeeb5ca9ec9669bcc944..a2c24858d40f94f449de0e958c7971b31c3fff61 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/cifsencrypt.c
  *
- *   Copyright (C) International Business Machines  Corp., 2003
+ *   Copyright (C) International Business Machines  Corp., 2005
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -82,6 +82,59 @@ int cifs_sign_smb(struct smb_hdr * cifs_pdu, struct TCP_Server_Info * server,
        return rc;
 }
 
+static int cifs_calc_signature2(const struct kvec * iov, int n_vec,
+                               const char * key, char * signature)
+{
+        struct  MD5Context context;
+
+        if((iov == NULL) || (signature == NULL))
+                return -EINVAL;
+
+        MD5Init(&context);
+        MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
+
+/*        MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length); */ /* BB FIXME BB */
+
+        MD5Final(signature,&context);
+
+       return -EOPNOTSUPP;
+/*        return 0; */
+}
+
+
+int cifs_sign_smb2(struct kvec * iov, int n_vec, struct TCP_Server_Info *server,
+                  __u32 * pexpected_response_sequence_number)
+{
+       int rc = 0;
+       char smb_signature[20];
+       struct smb_hdr * cifs_pdu = iov[0].iov_base;
+
+       if((cifs_pdu == NULL) || (server == NULL))
+               return -EINVAL;
+
+       if((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
+               return rc;
+
+        spin_lock(&GlobalMid_Lock);
+        cifs_pdu->Signature.Sequence.SequenceNumber = 
+                               cpu_to_le32(server->sequence_number);
+        cifs_pdu->Signature.Sequence.Reserved = 0;
+
+        *pexpected_response_sequence_number = server->sequence_number++;
+        server->sequence_number++;
+        spin_unlock(&GlobalMid_Lock);
+
+        rc = cifs_calc_signature2(iov, n_vec, server->mac_signing_key,
+                                     smb_signature);
+        if(rc)
+                memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
+        else
+                memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
+
+        return rc;
+
+}
+
 int cifs_verify_signature(struct smb_hdr * cifs_pdu, const char * mac_key,
        __u32 expected_sequence_number)
 {
index e10213b7541e4ec6685daa2c91ffe6a8aaf2c1c4..79eeccd0437f08c2a3ac56a51af2de0cadc36f19 100644 (file)
@@ -513,6 +513,17 @@ static ssize_t cifs_file_aio_write(struct kiocb *iocb, const char __user *buf,
        return written;
 }
 
+static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
+{
+       /* origin == SEEK_END => we must revalidate the cached file length */
+       if (origin == 2) {
+               int retval = cifs_revalidate(file->f_dentry);
+               if (retval < 0)
+                       return (loff_t)retval;
+       }
+       return remote_llseek(file, offset, origin);
+}
+
 static struct file_system_type cifs_fs_type = {
        .owner = THIS_MODULE,
        .name = "cifs",
@@ -586,6 +597,7 @@ struct file_operations cifs_file_ops = {
        .flush = cifs_flush,
        .mmap  = cifs_file_mmap,
        .sendfile = generic_file_sendfile,
+       .llseek = cifs_llseek,
 #ifdef CONFIG_CIFS_POSIX
        .ioctl  = cifs_ioctl,
 #endif /* CONFIG_CIFS_POSIX */
@@ -609,7 +621,7 @@ struct file_operations cifs_file_direct_ops = {
 #ifdef CONFIG_CIFS_POSIX
        .ioctl  = cifs_ioctl,
 #endif /* CONFIG_CIFS_POSIX */
-
+       .llseek = cifs_llseek,
 #ifdef CONFIG_CIFS_EXPERIMENTAL
        .dir_notify = cifs_dir_notify,
 #endif /* CONFIG_CIFS_EXPERIMENTAL */
@@ -627,6 +639,7 @@ struct file_operations cifs_file_nobrl_ops = {
        .flush = cifs_flush,
        .mmap  = cifs_file_mmap,
        .sendfile = generic_file_sendfile,
+       .llseek = cifs_llseek,
 #ifdef CONFIG_CIFS_POSIX
        .ioctl  = cifs_ioctl,
 #endif /* CONFIG_CIFS_POSIX */
@@ -649,7 +662,7 @@ struct file_operations cifs_file_direct_nobrl_ops = {
 #ifdef CONFIG_CIFS_POSIX
        .ioctl  = cifs_ioctl,
 #endif /* CONFIG_CIFS_POSIX */
-
+       .llseek = cifs_llseek,
 #ifdef CONFIG_CIFS_EXPERIMENTAL
        .dir_notify = cifs_dir_notify,
 #endif /* CONFIG_CIFS_EXPERIMENTAL */
@@ -733,7 +746,7 @@ cifs_init_request_bufs(void)
                kmem_cache_destroy(cifs_req_cachep);
                return -ENOMEM;
        }
-       /* 256 (MAX_CIFS_HDR_SIZE bytes is enough for most SMB responses and
+       /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
        almost all handle based requests (but not write response, nor is it
        sufficient for path based requests).  A smaller size would have
        been more efficient (compacting multiple slab items on one 4k page) 
@@ -742,7 +755,8 @@ cifs_init_request_bufs(void)
        efficient to alloc 1 per page off the slab compared to 17K (5page) 
        alloc of large cifs buffers even when page debugging is on */
        cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
-                       MAX_CIFS_HDR_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+                       MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, 
+                       NULL, NULL);
        if (cifs_sm_req_cachep == NULL) {
                mempool_destroy(cifs_req_poolp);
                kmem_cache_destroy(cifs_req_cachep);
@@ -954,6 +968,12 @@ init_cifs(void)
        atomic_set(&tconInfoReconnectCount, 0);
 
        atomic_set(&bufAllocCount, 0);
+       atomic_set(&smBufAllocCount, 0);
+#ifdef CONFIG_CIFS_STATS2
+       atomic_set(&totBufAllocCount, 0);
+       atomic_set(&totSmBufAllocCount, 0);
+#endif /* CONFIG_CIFS_STATS2 */
+
        atomic_set(&midCount, 0);
        GlobalCurrentXid = 0;
        GlobalTotalActiveXid = 0;
index 9ec40e0e54fc4eed289eb9070aeb0c8120f610cd..821a8eb2255965a77e031c37a6b2f4c674f97e48 100644 (file)
@@ -99,5 +99,5 @@ extern ssize_t        cifs_getxattr(struct dentry *, const char *, void *, size_t);
 extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
 extern int cifs_ioctl (struct inode * inode, struct file * filep,
                       unsigned int command, unsigned long arg);
-#define CIFS_VERSION   "1.39"
+#define CIFS_VERSION   "1.40"
 #endif                         /* _CIFSFS_H */
index 1ba08f8c5bc4aafac85cf3f83929391d6c633e17..7bed27601ce59fb8c40dea3f762dcce07d143549 100644 (file)
@@ -233,6 +233,8 @@ struct cifsTconInfo {
        atomic_t num_hardlinks;
        atomic_t num_symlinks;
        atomic_t num_locks;
+       atomic_t num_acl_get;
+       atomic_t num_acl_set;
 #ifdef CONFIG_CIFS_STATS2
        unsigned long long time_writes;
        unsigned long long time_reads;
@@ -285,6 +287,7 @@ struct cifs_search_info {
        unsigned endOfSearch:1;
        unsigned emptyDir:1;
        unsigned unicode:1;
+       unsigned smallBuf:1; /* so we know which buf_release function to call */
 };
 
 struct cifsFileInfo {
@@ -420,7 +423,12 @@ struct dir_notify_req {
 #define   MID_RESPONSE_RECEIVED 4
 #define   MID_RETRY_NEEDED      8 /* session closed while this request out */
 #define   MID_NO_RESP_NEEDED 0x10
-#define   MID_SMALL_BUFFER   0x20 /* 112 byte response buffer instead of 4K */
+
+/* Types of response buffer returned from SendReceive2 */
+#define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
+#define   CIFS_SMALL_BUFFER     1
+#define   CIFS_LARGE_BUFFER     2
+#define   CIFS_IOVEC            4    /* array of response buffers */
 
 /*
  *****************************************************************
@@ -505,8 +513,12 @@ GLOBAL_EXTERN atomic_t tcpSesReconnectCount;
 GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
 
 /* Various Debug counters to remove someday (BB) */
-GLOBAL_EXTERN atomic_t bufAllocCount;
-GLOBAL_EXTERN atomic_t smBufAllocCount;      
+GLOBAL_EXTERN atomic_t bufAllocCount;    /* current number allocated  */
+#ifdef CONFIG_CIFS_STATS2
+GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
+GLOBAL_EXTERN atomic_t totSmBufAllocCount;
+#endif
+GLOBAL_EXTERN atomic_t smBufAllocCount;
 GLOBAL_EXTERN atomic_t midCount;
 
 /* Misc globals */
index 33e1859fd2f67ced969247e6a6bc365a941a8895..cc2471094ca58f38dfbd206005c511e41195d391 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/cifspdu.h
  *
- *   Copyright (c) International Business Machines  Corp., 2002
+ *   Copyright (c) International Business Machines  Corp., 2002,2005
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
 #define NT_TRANSACT_GET_USER_QUOTA    0x07
 #define NT_TRANSACT_SET_USER_QUOTA    0x08
 
-#define MAX_CIFS_HDR_SIZE 256  /* is future chained NTCreateXReadX bigger? */
+#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
+/* future chained NTCreateXReadX bigger, but for time being NTCreateX biggest */
+/* among the requests (NTCreateX response is bigger with wct of 34) */
+#define MAX_CIFS_HDR_SIZE 0x58 /* 4 len + 32 hdr + (2*24 wct) + 2 bct + 2 pad */
+#define CIFS_SMALL_PATH 120 /* allows for (448-88)/3 */
 
 /* internal cifs vfs structures */
 /*****************************************************************
@@ -524,7 +528,7 @@ typedef union smb_com_session_setup_andx {
                /* STRING PrimaryDomain */
                /* STRING NativeOS */
                /* STRING NativeLanMan */
-       } __attribute__((packed)) old_req;              /* pre-NTLM (LANMAN2.1) request format */
+       } __attribute__((packed)) old_req; /* pre-NTLM (LANMAN2.1) req format */
 
        struct {                /* default (NTLM) response format */
                struct smb_hdr hdr;     /* wct = 3 */
@@ -536,7 +540,7 @@ typedef union smb_com_session_setup_andx {
                unsigned char NativeOS[1];      /* followed by */
 /*     unsigned char * NativeLanMan; */
 /*      unsigned char * PrimaryDomain; */
-       } __attribute__((packed)) old_resp;             /* pre-NTLM (LANMAN2.1) response format */
+       } __attribute__((packed)) old_resp; /* pre-NTLM (LANMAN2.1) response */
 } __attribute__((packed)) SESSION_SETUP_ANDX;
 
 #define CIFS_NETWORK_OPSYS "CIFS VFS Client for Linux"
@@ -1003,10 +1007,49 @@ typedef struct smb_com_setattr_rsp {
 
 /* empty wct response to setattr */
 
-/***************************************************/
-/* NT Transact structure defintions follow         */
-/* Currently only ioctl and notify are implemented */
-/***************************************************/
+/*******************************************************/
+/* NT Transact structure defintions follow             */
+/* Currently only ioctl, acl (get security descriptor) */  
+/* and notify are implemented                          */
+/*******************************************************/
+typedef struct smb_com_ntransact_req {
+        struct smb_hdr hdr; /* wct >= 19 */
+        __u8 MaxSetupCount;
+        __u16 Reserved;
+        __le32 TotalParameterCount;
+        __le32 TotalDataCount;
+        __le32 MaxParameterCount;
+        __le32 MaxDataCount;
+        __le32 ParameterCount;
+        __le32 ParameterOffset;
+        __le32 DataCount;
+        __le32 DataOffset;
+        __u8 SetupCount; /* four setup words follow subcommand */
+        /* SNIA spec incorrectly included spurious pad here */
+        __le16 SubCommand; /* 2 = IOCTL/FSCTL */
+       /* SetupCount words follow then */ 
+        __le16 ByteCount;
+        __u8 Pad[3];
+        __u8 Parms[0];
+} __attribute__((packed)) NTRANSACT_REQ;
+
+typedef struct smb_com_ntransact_rsp {
+       struct smb_hdr hdr;     /* wct = 18 */
+       __u8 Reserved[3];
+       __le32 TotalParameterCount;
+       __le32 TotalDataCount;
+       __le32 ParameterCount;
+       __le32 ParameterOffset;
+       __le32 ParameterDisplacement;
+       __le32 DataCount;
+       __le32 DataOffset;
+       __le32 DataDisplacement;
+       __u8 SetupCount;   /* 0 */
+       __u16 ByteCount;
+        /* __u8 Pad[3]; */
+       /* parms and data follow */
+} __attribute__((packed)) NTRANSACT_RSP;
+
 typedef struct smb_com_transaction_ioctl_req {
        struct smb_hdr hdr;     /* wct = 23 */
        __u8 MaxSetupCount;
@@ -1021,11 +1064,11 @@ typedef struct smb_com_transaction_ioctl_req {
        __le32 DataOffset;
        __u8 SetupCount; /* four setup words follow subcommand */
        /* SNIA spec incorrectly included spurious pad here */
-       __le16 SubCommand;/* 2 = IOCTL/FSCTL */
+       __le16 SubCommand; /* 2 = IOCTL/FSCTL */
        __le32 FunctionCode;
        __u16 Fid;
-       __u8 IsFsctl;    /* 1 = File System Control, 0 = device control (IOCTL)*/
-       __u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS share)*/
+       __u8 IsFsctl;  /* 1 = File System Control 0 = device control (IOCTL) */
+       __u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS*/
        __le16 ByteCount;
        __u8 Pad[3];
        __u8 Data[1];
@@ -1045,9 +1088,35 @@ typedef struct smb_com_transaction_ioctl_rsp {
        __u8 SetupCount;        /* 1 */
        __le16 ReturnedDataLen;
        __u16 ByteCount;
-       __u8 Pad[3];
 } __attribute__((packed)) TRANSACT_IOCTL_RSP;
 
+#define CIFS_ACL_OWNER 1
+#define CIFS_ACL_GROUP 2
+#define CIFS_ACL_DACL  4
+#define CIFS_ACL_SACL  8
+
+typedef struct smb_com_transaction_qsec_req {
+       struct smb_hdr hdr;     /* wct = 19 */
+       __u8 MaxSetupCount;
+       __u16 Reserved;
+       __le32 TotalParameterCount;
+       __le32 TotalDataCount;
+       __le32 MaxParameterCount;
+       __le32 MaxDataCount;
+       __le32 ParameterCount;
+       __le32 ParameterOffset;
+       __le32 DataCount;
+       __le32 DataOffset;
+       __u8 SetupCount; /* no setup words follow subcommand */
+       /* SNIA spec incorrectly included spurious pad here */
+       __le16 SubCommand; /* 6 = QUERY_SECURITY_DESC */
+       __le16 ByteCount; /* bcc = 3 + 8 */
+       __u8 Pad[3];
+       __u16 Fid;
+       __u16 Reserved2;
+       __le32 AclFlags;
+} __attribute__((packed)) QUERY_SEC_DESC_REQ;
+
 typedef struct smb_com_transaction_change_notify_req {
        struct smb_hdr hdr;     /* wct = 23 */
        __u8 MaxSetupCount;
@@ -1068,10 +1137,12 @@ typedef struct smb_com_transaction_change_notify_req {
        __u8 WatchTree;  /* 1 = Monitor subdirectories */
        __u8 Reserved2;
        __le16 ByteCount;
-/* __u8 Pad[3];*/
+/*     __u8 Pad[3];*/
 /*     __u8 Data[1];*/
 } __attribute__((packed)) TRANSACT_CHANGE_NOTIFY_REQ;
 
+/* BB eventually change to use generic ntransact rsp struct 
+      and validation routine */
 typedef struct smb_com_transaction_change_notify_rsp {
        struct smb_hdr hdr;     /* wct = 18 */
        __u8 Reserved[3];
index 1b73f4f4c5ce6d1cb88e95e53fa8420fe1382957..3c03aadaff0c479d28ae954469829b47039f0b9a 100644 (file)
@@ -48,8 +48,8 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
                        struct smb_hdr * /* out */ ,
                        int * /* bytes returned */ , const int long_op);
 extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
-                       struct kvec *, int /* nvec */,
-                       int * /* bytes returned */ , const int long_op);
+                       struct kvec *, int /* nvec to send */, 
+                       int * /* type of buf returned */ , const int long_op);
 extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid);
 extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length);
 extern int is_valid_oplock_break(struct smb_hdr *smb);
@@ -93,11 +93,12 @@ extern int CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
                        const struct nls_table *);
 
 extern int CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
-            const char *searchName, const struct nls_table *nls_codepage,
-            __u16 *searchHandle, struct cifs_search_info * psrch_inf, int map, const char dirsep);
+               const char *searchName, const struct nls_table *nls_codepage,
+               __u16 *searchHandle, struct cifs_search_info * psrch_inf, 
+               int map, const char dirsep);
 
 extern int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
-            __u16 searchHandle, struct cifs_search_info * psrch_inf);
+               __u16 searchHandle, struct cifs_search_info * psrch_inf);
 
 extern int CIFSFindClose(const int, struct cifsTconInfo *tcon,
                        const __u16 search_handle);
@@ -230,19 +231,18 @@ extern int CIFSSMBClose(const int xid, struct cifsTconInfo *tcon,
                        const int smb_file_id);
 
 extern int CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
-                       const int netfid, unsigned int count,
-                       const __u64 lseek, unsigned int *nbytes, char **buf);
+                        const int netfid, unsigned int count,
+                        const __u64 lseek, unsigned int *nbytes, char **buf,
+                       int * return_buf_type);
 extern int CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
                        const int netfid, const unsigned int count,
                        const __u64 lseek, unsigned int *nbytes,
                        const char *buf, const char __user *ubuf, 
                        const int long_op);
-#ifdef CONFIG_CIFS_EXPERIMENTAL
 extern int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
                        const int netfid, const unsigned int count,
                        const __u64 offset, unsigned int *nbytes, 
                        struct kvec *iov, const int nvec, const int long_op);
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
 extern int CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
                        const unsigned char *searchName, __u64 * inode_number,
                        const struct nls_table *nls_codepage, 
@@ -269,6 +269,8 @@ extern void tconInfoFree(struct cifsTconInfo *);
 extern int cifs_reconnect(struct TCP_Server_Info *server);
 
 extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *,__u32 *);
+extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
+                         __u32 *);
 extern int cifs_verify_signature(struct smb_hdr *, const char * mac_key,
        __u32 expected_sequence_number);
 extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass);
@@ -297,6 +299,9 @@ extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
                const char *fileName, const char * ea_name, 
                const void * ea_value, const __u16 ea_value_len, 
                const struct nls_table *nls_codepage, int remap_special_chars);
+extern int CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon,
+                       __u16 fid, char *acl_inf, const int buflen,
+                       const int acl_type /* ACCESS vs. DEFAULT */);
 extern int CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
                const unsigned char *searchName,
                char *acl_inf, const int buflen,const int acl_type,
index 6867e556d37e51485a4e9d35fb7ea332fc971b6d..217323b0c8966ae62db2683038adcda6d53dce3b 100644 (file)
@@ -37,6 +37,7 @@
 #include "cifsproto.h"
 #include "cifs_unicode.h"
 #include "cifs_debug.h"
+#include "cifsacl.h"
 
 #ifdef CONFIG_CIFS_POSIX
 static struct {
@@ -372,8 +373,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
        rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
        if (rc == 0) {
-               server->secMode = pSMBr->SecurityMode;  
-               server->secType = NTLM; /* BB override default for 
+               server->secMode = pSMBr->SecurityMode;
+               if((server->secMode & SECMODE_USER) == 0)
+                       cFYI(1,("share mode security"));
+               server->secType = NTLM; /* BB override default for
                                           NTLMv2 or kerberos v5 */
                /* one byte - no need to convert this or EncryptionKeyLen
                   from little endian */
@@ -383,7 +386,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
                        min(le32_to_cpu(pSMBr->MaxBufferSize),
                        (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
                server->maxRw = le32_to_cpu(pSMBr->MaxRawSize);
-               cFYI(0, ("Max buf = %d ", ses->server->maxBuf));
+               cFYI(0, ("Max buf = %d", ses->server->maxBuf));
                GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
                server->capabilities = le32_to_cpu(pSMBr->Capabilities);
                server->timeZone = le16_to_cpu(pSMBr->ServerTimeZone);  
@@ -411,8 +414,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
                                                (server->server_GUID,
                                                pSMBr->u.extended_response.
                                                GUID, 16) != 0) {
-                                               cFYI(1,
-                                                    ("UID of server does not match previous connection to same ip address"));
+                                               cFYI(1, ("server UID changed"));
                                                memcpy(server->
                                                        server_GUID,
                                                        pSMBr->u.
@@ -958,21 +960,19 @@ openRetry:
        return rc;
 }
 
-/* If no buffer passed in, then caller wants to do the copy
-       as in the case of readpages so the SMB buffer must be
-       freed by the caller */
-
 int
 CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
-           const int netfid, const unsigned int count,
-           const __u64 lseek, unsigned int *nbytes, char **buf)
+            const int netfid, const unsigned int count,
+            const __u64 lseek, unsigned int *nbytes, char **buf,
+           int * pbuf_type)
 {
        int rc = -EACCES;
        READ_REQ *pSMB = NULL;
        READ_RSP *pSMBr = NULL;
        char *pReadData = NULL;
-       int bytes_returned;
        int wct;
+       int resp_buf_type = 0;
+       struct kvec iov[1];
 
        cFYI(1,("Reading %d bytes on fid %d",count,netfid));
        if(tcon->ses->capabilities & CAP_LARGE_FILES)
@@ -981,8 +981,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
                wct = 10; /* old style read */
 
        *nbytes = 0;
-       rc = smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **) &pSMB,
-                     (void **) &pSMBr);
+       rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **) &pSMB);
        if (rc)
                return rc;
 
@@ -990,13 +989,13 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
        if (tcon->ses->server == NULL)
                return -ECONNABORTED;
 
-       pSMB->AndXCommand = 0xFF;       /* none */
+       pSMB->AndXCommand = 0xFF;       /* none */
        pSMB->Fid = netfid;
        pSMB->OffsetLow = cpu_to_le32(lseek & 0xFFFFFFFF);
        if(wct == 12)
                pSMB->OffsetHigh = cpu_to_le32(lseek >> 32);
-        else if((lseek >> 32) > 0) /* can not handle this big offset for old */
-                return -EIO;
+       else if((lseek >> 32) > 0) /* can not handle this big offset for old */
+               return -EIO;
 
        pSMB->Remaining = 0;
        pSMB->MaxCount = cpu_to_le16(count & 0xFFFF);
@@ -1005,14 +1004,18 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
                pSMB->ByteCount = 0;  /* no need to do le conversion since 0 */
        else {
                /* old style read */
-               struct smb_com_readx_req * pSMBW = 
+               struct smb_com_readx_req * pSMBW =
                        (struct smb_com_readx_req *)pSMB;
-               pSMBW->ByteCount = 0;   
+               pSMBW->ByteCount = 0;
        }
-       
-       rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
-                        (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+
+       iov[0].iov_base = (char *)pSMB;
+       iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
+       rc = SendReceive2(xid, tcon->ses, iov, 
+                         1 /* num iovecs */,
+                         &resp_buf_type, 0); 
        cifs_stats_inc(&tcon->num_reads);
+       pSMBr = (READ_RSP *)iov[0].iov_base;
        if (rc) {
                cERROR(1, ("Send error in read = %d", rc));
        } else {
@@ -1022,33 +1025,43 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
                *nbytes = data_length;
 
                /*check that DataLength would not go beyond end of SMB */
-               if ((data_length > CIFSMaxBufSize) 
+               if ((data_length > CIFSMaxBufSize)
                                || (data_length > count)) {
                        cFYI(1,("bad length %d for count %d",data_length,count));
                        rc = -EIO;
                        *nbytes = 0;
                } else {
-                       pReadData =
-                           (char *) (&pSMBr->hdr.Protocol) +
+                       pReadData = (char *) (&pSMBr->hdr.Protocol) +
                            le16_to_cpu(pSMBr->DataOffset);
-/*                     if(rc = copy_to_user(buf, pReadData, data_length)) {
-                               cERROR(1,("Faulting on read rc = %d",rc));
-                               rc = -EFAULT;
-                       }*/ /* can not use copy_to_user when using page cache*/
+/*                      if(rc = copy_to_user(buf, pReadData, data_length)) {
+                                cERROR(1,("Faulting on read rc = %d",rc));
+                                rc = -EFAULT;
+                        }*/ /* can not use copy_to_user when using page cache*/
                        if(*buf)
-                           memcpy(*buf,pReadData,data_length);
+                               memcpy(*buf,pReadData,data_length);
                }
        }
-       if(*buf)
-               cifs_buf_release(pSMB);
-       else
-               *buf = (char *)pSMB;
 
-       /* Note: On -EAGAIN error only caller can retry on handle based calls 
+       cifs_small_buf_release(pSMB);
+       if(*buf) {
+               if(resp_buf_type == CIFS_SMALL_BUFFER)
+                       cifs_small_buf_release(iov[0].iov_base);
+               else if(resp_buf_type == CIFS_LARGE_BUFFER)
+                       cifs_buf_release(iov[0].iov_base);
+       } else /* return buffer to caller to free */ /* BB FIXME how do we tell caller if it is not a large buffer */ {
+               *buf = iov[0].iov_base;
+               if(resp_buf_type == CIFS_SMALL_BUFFER)
+                       *pbuf_type = CIFS_SMALL_BUFFER;
+               else if(resp_buf_type == CIFS_LARGE_BUFFER)
+                       *pbuf_type = CIFS_LARGE_BUFFER;
+       }
+
+       /* Note: On -EAGAIN error only caller can retry on handle based calls
                since file handle passed in no longer valid */
        return rc;
 }
 
+
 int
 CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
             const int netfid, const unsigned int count,
@@ -1155,7 +1168,6 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
        return rc;
 }
 
-#ifdef CONFIG_CIFS_EXPERIMENTAL
 int
 CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
             const int netfid, const unsigned int count,
@@ -1164,10 +1176,10 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
 {
        int rc = -EACCES;
        WRITE_REQ *pSMB = NULL;
-       int bytes_returned, wct;
+       int wct;
        int smb_hdr_len;
+       int resp_buf_type = 0;
 
-       /* BB removeme BB */
        cFYI(1,("write2 at %lld %d bytes", (long long)offset, count));
 
        if(tcon->ses->capabilities & CAP_LARGE_FILES)
@@ -1210,22 +1222,34 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
                pSMBW->ByteCount = cpu_to_le16(count + 5);
        }
        iov[0].iov_base = pSMB;
-       iov[0].iov_len = smb_hdr_len + 4;
+       if(wct == 14)
+               iov[0].iov_len = smb_hdr_len + 4;
+       else /* wct == 12 pad bigger by four bytes */
+               iov[0].iov_len = smb_hdr_len + 8;
+       
 
-       rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &bytes_returned,
+       rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type,
                          long_op);
        cifs_stats_inc(&tcon->num_writes);
        if (rc) {
                cFYI(1, ("Send error Write2 = %d", rc));
                *nbytes = 0;
+       } else if(resp_buf_type == 0) {
+               /* presumably this can not happen, but best to be safe */
+               rc = -EIO;
+               *nbytes = 0;
        } else {
-               WRITE_RSP * pSMBr = (WRITE_RSP *)pSMB;
+               WRITE_RSP * pSMBr = (WRITE_RSP *)iov[0].iov_base;
                *nbytes = le16_to_cpu(pSMBr->CountHigh);
                *nbytes = (*nbytes) << 16;
                *nbytes += le16_to_cpu(pSMBr->Count);
-       }
+       } 
 
        cifs_small_buf_release(pSMB);
+       if(resp_buf_type == CIFS_SMALL_BUFFER)
+               cifs_small_buf_release(iov[0].iov_base);
+       else if(resp_buf_type == CIFS_LARGE_BUFFER)
+               cifs_buf_release(iov[0].iov_base);
 
        /* Note: On -EAGAIN error only caller can retry on handle based calls 
                since file handle passed in no longer valid */
@@ -1234,8 +1258,6 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
 }
 
 
-#endif /* CIFS_EXPERIMENTAL */
-
 int
 CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
            const __u16 smb_file_id, const __u64 len,
@@ -1906,6 +1928,90 @@ querySymLinkRetry:
        return rc;
 }
 
+/* Initialize NT TRANSACT SMB into small smb request buffer.
+   This assumes that all NT TRANSACTS that we init here have
+   total parm and data under about 400 bytes (to fit in small cifs
+   buffer size), which is the case so far, it easily fits. NB:
+       Setup words themselves and ByteCount
+       MaxSetupCount (size of returned setup area) and
+       MaxParameterCount (returned parms size) must be set by caller */
+static int 
+smb_init_ntransact(const __u16 sub_command, const int setup_count,
+                  const int parm_len, struct cifsTconInfo *tcon,
+                  void ** ret_buf)
+{
+       int rc;
+       __u32 temp_offset;
+       struct smb_com_ntransact_req * pSMB;
+
+       rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon,
+                               (void **)&pSMB);
+       if (rc)
+               return rc;
+       *ret_buf = (void *)pSMB;
+       pSMB->Reserved = 0;
+       pSMB->TotalParameterCount = cpu_to_le32(parm_len);
+       pSMB->TotalDataCount  = 0;
+       pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
+                                         MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
+       pSMB->ParameterCount = pSMB->TotalParameterCount;
+       pSMB->DataCount  = pSMB->TotalDataCount;
+       temp_offset = offsetof(struct smb_com_ntransact_req, Parms) +
+                       (setup_count * 2) - 4 /* for rfc1001 length itself */;
+       pSMB->ParameterOffset = cpu_to_le32(temp_offset);
+       pSMB->DataOffset = cpu_to_le32(temp_offset + parm_len);
+       pSMB->SetupCount = setup_count; /* no need to le convert byte fields */
+       pSMB->SubCommand = cpu_to_le16(sub_command);
+       return 0;
+}
+
+static int
+validate_ntransact(char * buf, char ** ppparm, char ** ppdata,
+                  int * pdatalen, int * pparmlen)
+{
+       char * end_of_smb;
+       __u32 data_count, data_offset, parm_count, parm_offset;
+       struct smb_com_ntransact_rsp * pSMBr;
+
+       if(buf == NULL)
+               return -EINVAL;
+
+       pSMBr = (struct smb_com_ntransact_rsp *)buf;
+
+       /* ByteCount was converted from little endian in SendReceive */
+       end_of_smb = 2 /* sizeof byte count */ + pSMBr->ByteCount + 
+                       (char *)&pSMBr->ByteCount;
+
+               
+       data_offset = le32_to_cpu(pSMBr->DataOffset);
+       data_count = le32_to_cpu(pSMBr->DataCount);
+        parm_offset = le32_to_cpu(pSMBr->ParameterOffset);
+       parm_count = le32_to_cpu(pSMBr->ParameterCount);
+
+       *ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset;
+       *ppdata = (char *)&pSMBr->hdr.Protocol + data_offset;
+
+       /* should we also check that parm and data areas do not overlap? */
+       if(*ppparm > end_of_smb) {
+               cFYI(1,("parms start after end of smb"));
+               return -EINVAL;
+       } else if(parm_count + *ppparm > end_of_smb) {
+               cFYI(1,("parm end after end of smb"));
+               return -EINVAL;
+       } else if(*ppdata > end_of_smb) {
+               cFYI(1,("data starts after end of smb"));
+               return -EINVAL;
+       } else if(data_count + *ppdata > end_of_smb) {
+               cFYI(1,("data %p + count %d (%p) ends after end of smb %p start %p",
+                       *ppdata, data_count, (data_count + *ppdata), end_of_smb, pSMBr));  /* BB FIXME */
+               return -EINVAL;
+       } else if(parm_count + data_count > pSMBr->ByteCount) {
+               cFYI(1,("parm count and data count larger than SMB"));
+               return -EINVAL;
+       }
+       return 0;
+}
+
 int
 CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
                        const unsigned char *searchName,
@@ -1928,7 +2034,8 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
        pSMB->TotalDataCount = 0;
        pSMB->MaxParameterCount = cpu_to_le32(2);
        /* BB find exact data count max from sess structure BB */
-       pSMB->MaxDataCount = cpu_to_le32(4000);
+       pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
+                                         MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
        pSMB->MaxSetupCount = 4;
        pSMB->Reserved = 0;
        pSMB->ParameterOffset = 0;
@@ -1955,7 +2062,9 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
                        rc = -EIO;      /* bad smb */
                else {
                        if(data_count && (data_count < 2048)) {
-                               char * end_of_smb = pSMBr->ByteCount + (char *)&pSMBr->ByteCount;
+                               char * end_of_smb = 2 /* sizeof byte count */ +
+                                               pSMBr->ByteCount +
+                                               (char *)&pSMBr->ByteCount;
 
                                struct reparse_data * reparse_buf = (struct reparse_data *)
                                        ((char *)&pSMBr->hdr.Protocol + data_offset);
@@ -2199,6 +2308,7 @@ queryAclRetry:
 
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+       cifs_stats_inc(&tcon->num_acl_get);
        if (rc) {
                cFYI(1, ("Send error in Query POSIX ACL = %d", rc));
        } else {
@@ -2386,6 +2496,92 @@ GetExtAttrOut:
 
 #endif /* CONFIG_POSIX */
 
+
+/* security id for everyone */
+const struct cifs_sid sid_everyone = {1, 1, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0}};
+/* group users */
+const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {32, 545, 0, 0}};
+
+/* Convert CIFS ACL to POSIX form */
+static int parse_sec_desc(struct cifs_sid * psec_desc, int acl_len)
+{
+       return 0;
+}
+
+/* Get Security Descriptor (by handle) from remote server for a file or dir */
+int
+CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
+         /*  BB fix up return info */ char *acl_inf, const int buflen, 
+                 const int acl_type /* ACCESS/DEFAULT not sure implication */)
+{
+       int rc = 0;
+       int buf_type = 0;
+       QUERY_SEC_DESC_REQ * pSMB;
+       struct kvec iov[1];
+
+       cFYI(1, ("GetCifsACL"));
+
+       rc = smb_init_ntransact(NT_TRANSACT_QUERY_SECURITY_DESC, 0, 
+                       8 /* parm len */, tcon, (void **) &pSMB);
+       if (rc)
+               return rc;
+
+       pSMB->MaxParameterCount = cpu_to_le32(4);
+       /* BB TEST with big acls that might need to be e.g. larger than 16K */
+       pSMB->MaxSetupCount = 0;
+       pSMB->Fid = fid; /* file handle always le */
+       pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP |
+                                    CIFS_ACL_DACL);
+       pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */
+       pSMB->hdr.smb_buf_length += 11;
+       iov[0].iov_base = (char *)pSMB;
+       iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
+
+       rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type, 0);
+       cifs_stats_inc(&tcon->num_acl_get);
+       if (rc) {
+               cFYI(1, ("Send error in QuerySecDesc = %d", rc));
+       } else {                /* decode response */
+               struct cifs_sid * psec_desc;
+               __le32 * parm;
+               int parm_len;
+               int data_len;
+               int acl_len;
+               struct smb_com_ntransact_rsp * pSMBr;
+
+/* validate_nttransact */
+               rc = validate_ntransact(iov[0].iov_base, (char **)&parm, 
+                                       (char **)&psec_desc,
+                                       &parm_len, &data_len);
+               
+               if(rc)
+                       goto qsec_out;
+               pSMBr = (struct smb_com_ntransact_rsp *)iov[0].iov_base;
+
+               cERROR(1,("smb %p parm %p data %p",pSMBr,parm,psec_desc));  /* BB removeme BB */
+
+               if (le32_to_cpu(pSMBr->ParameterCount) != 4) {
+                       rc = -EIO;      /* bad smb */
+                       goto qsec_out;
+               }
+
+/* BB check that data area is minimum length and as big as acl_len */
+
+               acl_len = le32_to_cpu(*(__le32 *)parm);
+               /* BB check if(acl_len > bufsize) */
+
+               parse_sec_desc(psec_desc, acl_len);
+       }
+qsec_out:
+       if(buf_type == CIFS_SMALL_BUFFER)
+               cifs_small_buf_release(iov[0].iov_base);
+       else if(buf_type == CIFS_LARGE_BUFFER)
+               cifs_buf_release(iov[0].iov_base);
+       cifs_small_buf_release(pSMB);
+       return rc;
+}
+
+
 /* Legacy Query Path Information call for lookup to old servers such
    as Win9x/WinME */
 int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
@@ -4284,7 +4480,7 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
 {
        int rc = 0;
        struct smb_com_transaction_change_notify_req * pSMB = NULL;
-       struct smb_com_transaction_change_notify_rsp * pSMBr = NULL;
+       struct smb_com_ntransaction_change_notify_rsp * pSMBr = NULL;
        struct dir_notify_req *dnotify_req;
        int bytes_returned;
 
@@ -4299,6 +4495,10 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
        pSMB->MaxParameterCount = cpu_to_le32(2);
        /* BB find exact data count max from sess structure BB */
        pSMB->MaxDataCount = 0; /* same in little endian or be */
+/* BB VERIFY verify which is correct for above BB */
+       pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
+                                            MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
+
        pSMB->MaxSetupCount = 4;
        pSMB->Reserved = 0;
        pSMB->ParameterOffset = 0;
index c467de8576105bdd3bdb4d1983b591be53609b2e..88f60aa520584a87acab13422d22940063f2b536 100644 (file)
@@ -76,12 +76,19 @@ struct smb_vol {
        unsigned setuids:1;
        unsigned noperm:1;
        unsigned no_psx_acl:1; /* set if posix acl support should be disabled */
+       unsigned cifs_acl:1;
        unsigned no_xattr:1;   /* set if xattr (EA) support should be disabled*/
        unsigned server_ino:1; /* use inode numbers from server ie UniqueId */
        unsigned direct_io:1;
        unsigned remap:1;   /* set to remap seven reserved chars in filenames */
        unsigned posix_paths:1;   /* unset to not ask for posix pathnames. */
        unsigned sfu_emul:1;
+       unsigned krb5:1;
+       unsigned ntlm:1;
+       unsigned ntlmv2:1;
+       unsigned nullauth:1; /* attempt to authenticate with null user */
+       unsigned sign:1;
+       unsigned seal:1;     /* encrypt */
        unsigned nocase;     /* request case insensitive filenames */
        unsigned nobrl;      /* disable sending byte range locks to srv */
        unsigned int rsize;
@@ -508,7 +515,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
                /* else length ok */
                reconnect = 0;
 
-               if(pdu_length > MAX_CIFS_HDR_SIZE - 4) {
+               if(pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
                        isLargeBuf = TRUE;
                        memcpy(bigbuf, smallbuf, 4);
                        smb_buffer = bigbuf;
@@ -777,7 +784,7 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
 
        /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */
        vol->rw = TRUE;
-
+       vol->ntlm = TRUE;
        /* default is always to request posix paths. */
        vol->posix_paths = 1;
 
@@ -903,6 +910,39 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
                                printk(KERN_WARNING "CIFS: ip address too long\n");
                                return 1;
                        }
+                } else if (strnicmp(data, "sec", 3) == 0) { 
+                        if (!value || !*value) {
+                               cERROR(1,("no security value specified"));
+                                continue;
+                        } else if (strnicmp(value, "krb5i", 5) == 0) {
+                               vol->sign = 1;
+                               vol->krb5 = 1;
+                       } else if (strnicmp(value, "krb5p", 5) == 0) {
+                               /* vol->seal = 1; 
+                                  vol->krb5 = 1; */
+                               cERROR(1,("Krb5 cifs privacy not supported"));
+                               return 1;
+                       } else if (strnicmp(value, "krb5", 4) == 0) {
+                               vol->krb5 = 1;
+                       } else if (strnicmp(value, "ntlmv2i", 7) == 0) {
+                               vol->ntlmv2 = 1;
+                               vol->sign = 1;
+                       } else if (strnicmp(value, "ntlmv2", 6) == 0) {
+                               vol->ntlmv2 = 1;
+                       } else if (strnicmp(value, "ntlmi", 5) == 0) {
+                               vol->ntlm = 1;
+                               vol->sign = 1;
+                       } else if (strnicmp(value, "ntlm", 4) == 0) {
+                               /* ntlm is default so can be turned off too */
+                               vol->ntlm = 1;
+                       } else if (strnicmp(value, "nontlm", 6) == 0) {
+                               vol->ntlm = 0;
+                       } else if (strnicmp(value, "none", 4) == 0) {
+                               vol->nullauth = 1; 
+                        } else {
+                                cERROR(1,("bad security option: %s", value));
+                                return 1;
+                        }
                } else if ((strnicmp(data, "unc", 3) == 0)
                           || (strnicmp(data, "target", 6) == 0)
                           || (strnicmp(data, "path", 4) == 0)) {
@@ -1120,6 +1160,10 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
                        vol->server_ino = 1;
                } else if (strnicmp(data, "noserverino",9) == 0) {
                        vol->server_ino = 0;
+               } else if (strnicmp(data, "cifsacl",7) == 0) {
+                       vol->cifs_acl = 1;
+               } else if (strnicmp(data, "nocifsacl", 9) == 0) {
+                       vol->cifs_acl = 0;
                } else if (strnicmp(data, "acl",3) == 0) {
                        vol->no_psx_acl = 0;
                } else if (strnicmp(data, "noacl",5) == 0) {
@@ -1546,7 +1590,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
                cFYI(1, ("Username: %s ", volume_info.username));
 
        } else {
-               cifserror("No username specified ");
+               cifserror("No username specified");
         /* In userspace mount helper we can get user name from alternate
            locations such as env variables and files on disk */
                kfree(volume_info.UNC);
@@ -1587,7 +1631,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
                return -EINVAL;
        } else /* which servers DFS root would we conect to */ {
                cERROR(1,
-                      ("CIFS mount error: No UNC path (e.g. -o unc=//192.168.1.100/public) specified  "));
+                      ("CIFS mount error: No UNC path (e.g. -o unc=//192.168.1.100/public) specified"));
                kfree(volume_info.UNC);
                kfree(volume_info.password);
                FreeXid(xid);
@@ -1626,7 +1670,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
 
 
        if (srvTcp) {
-               cFYI(1, ("Existing tcp session with server found "));                
+               cFYI(1, ("Existing tcp session with server found"));                
        } else {        /* create socket */
                if(volume_info.port)
                        sin_server.sin_port = htons(volume_info.port);
@@ -1689,11 +1733,11 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
 
        if (existingCifsSes) {
                pSesInfo = existingCifsSes;
-               cFYI(1, ("Existing smb sess found "));
+               cFYI(1, ("Existing smb sess found"));
                kfree(volume_info.password);
                /* volume_info.UNC freed at end of function */
        } else if (!rc) {
-               cFYI(1, ("Existing smb sess not found "));
+               cFYI(1, ("Existing smb sess not found"));
                pSesInfo = sesInfoAlloc();
                if (pSesInfo == NULL)
                        rc = -ENOMEM;
@@ -1751,7 +1795,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
                cifs_sb->mnt_gid = volume_info.linux_gid;
                cifs_sb->mnt_file_mode = volume_info.file_mode;
                cifs_sb->mnt_dir_mode = volume_info.dir_mode;
-               cFYI(1,("file mode: 0x%x  dir mode: 0x%x",cifs_sb->mnt_file_mode,cifs_sb->mnt_dir_mode));
+               cFYI(1,("file mode: 0x%x  dir mode: 0x%x",
+                       cifs_sb->mnt_file_mode,cifs_sb->mnt_dir_mode));
 
                if(volume_info.noperm)
                        cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
@@ -1767,6 +1812,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
                        cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL;
                if(volume_info.nobrl)
                        cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL;
+               if(volume_info.cifs_acl)
+                       cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
 
                if(volume_info.direct_io) {
                        cFYI(1,("mounting share using direct i/o"));
@@ -1777,7 +1824,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
                    find_unc(sin_server.sin_addr.s_addr, volume_info.UNC,
                             volume_info.username);
                if (tcon) {
-                       cFYI(1, ("Found match on UNC path "));
+                       cFYI(1, ("Found match on UNC path"));
                        /* we can have only one retry value for a connection
                           to a share so for resources mounted more than once
                           to the same server share the last value passed in 
@@ -1926,7 +1973,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
        __u32 capabilities;
        __u16 count;
 
-       cFYI(1, ("In sesssetup "));
+       cFYI(1, ("In sesssetup"));
        if(ses == NULL)
                return -EINVAL;
        user = ses->userName;
@@ -3202,9 +3249,26 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
 
        pSMB->AndXCommand = 0xFF;
        pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
-       pSMB->PasswordLength = cpu_to_le16(1);  /* minimum */
        bcc_ptr = &pSMB->Password[0];
-       bcc_ptr++;              /* skip password */
+       if((ses->server->secMode) & SECMODE_USER) {
+               pSMB->PasswordLength = cpu_to_le16(1);  /* minimum */
+               bcc_ptr++;              /* skip password */
+       } else {
+               pSMB->PasswordLength = cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+               /* BB FIXME add code to fail this if NTLMv2 or Kerberos
+                  specified as required (when that support is added to
+                  the vfs in the future) as only NTLM or the much
+                  weaker LANMAN (which we do not send) is accepted
+                  by Samba (not sure whether other servers allow
+                  NTLMv2 password here) */
+               SMBNTencrypt(ses->password,
+                            ses->server->cryptKey,
+                            bcc_ptr);
+
+               bcc_ptr += CIFS_SESSION_KEY_SIZE;
+               *bcc_ptr = 0;
+               bcc_ptr++; /* align */
+       }
 
        if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
                smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
@@ -3222,7 +3286,6 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
                bcc_ptr += 2 * length;  /* convert num of 16 bit words to bytes */
                bcc_ptr += 2;   /* skip trailing null */
        } else {                /* ASCII */
-
                strcpy(bcc_ptr, tree);
                bcc_ptr += strlen(tree) + 1;
        }
index 32cc96cafa3eb050d19871f85cf4e089c68b7562..fed55e3c53dfab365c72886001ed4f240ddd6cee 100644 (file)
@@ -3,7 +3,7 @@
  *
  *   vfs operations that deal with dentries
  * 
- *   Copyright (C) International Business Machines  Corp., 2002,2003
+ *   Copyright (C) International Business Machines  Corp., 2002,2005
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -200,8 +200,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
                        (oplock & CIFS_CREATE_ACTION))
                        if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
                                CIFSSMBUnixSetPerms(xid, pTcon, full_path, mode,
-                                       (__u64)current->euid,
-                                       (__u64)current->egid,
+                                       (__u64)current->fsuid,
+                                       (__u64)current->fsgid,
                                        0 /* dev */,
                                        cifs_sb->local_nls, 
                                        cifs_sb->mnt_cifs_flags & 
@@ -325,7 +325,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
        else if (pTcon->ses->capabilities & CAP_UNIX) {
                if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
                        rc = CIFSSMBUnixSetPerms(xid, pTcon, full_path,
-                               mode,(__u64)current->euid,(__u64)current->egid,
+                               mode,(__u64)current->fsuid,(__u64)current->fsgid,
                                device_number, cifs_sb->local_nls,
                                cifs_sb->mnt_cifs_flags & 
                                        CIFS_MOUNT_MAP_SPECIAL_CHR);
index 5ade53d7bca89624cd6b9381d4e6ba91543517be..77c990f0cb981706c55dcd936958c654d8f543e5 100644 (file)
@@ -553,13 +553,13 @@ int cifs_closedir(struct inode *inode, struct file *file)
                }
                ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
                if (ptmp) {
-   /* BB removeme BB */        cFYI(1, ("freeing smb buf in srch struct in closedir"));
+                       cFYI(1, ("closedir free smb buf in srch struct"));
                        pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
                        cifs_buf_release(ptmp);
                }
                ptmp = pCFileStruct->search_resume_name;
                if (ptmp) {
-   /* BB removeme BB */        cFYI(1, ("freeing resume name in closedir"));
+                       cFYI(1, ("closedir free resume name"));
                        pCFileStruct->search_resume_name = NULL;
                        kfree(ptmp);
                }
@@ -868,10 +868,9 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
                                if (rc != 0)
                                        break;
                        }
-#ifdef CONFIG_CIFS_EXPERIMENTAL
                        /* BB FIXME We can not sign across two buffers yet */
-                       if((experimEnabled) && ((pTcon->ses->server->secMode & 
-                        (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0)) {
+                       if((pTcon->ses->server->secMode & 
+                        (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0) {
                                struct kvec iov[2];
                                unsigned int len;
 
@@ -887,7 +886,6 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
                                                iov, 1, long_op);
                        } else
                        /* BB FIXME fixup indentation of line below */
-#endif                 
                        rc = CIFSSMBWrite(xid, pTcon,
                                 open_file->netfid,
                                 min_t(const int, cifs_sb->wsize, 
@@ -1024,7 +1022,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
        return rc;
 }
 
-#ifdef CONFIG_CIFS_EXPERIMENTAL
 static int cifs_writepages(struct address_space *mapping,
                           struct writeback_control *wbc)
 {
@@ -1227,7 +1224,6 @@ retry:
 
        return rc;
 }
-#endif
 
 static int cifs_writepage(struct page* page, struct writeback_control *wbc)
 {
@@ -1426,6 +1422,7 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
                rc = -EAGAIN;
                smb_read_data = NULL;
                while (rc == -EAGAIN) {
+                       int buf_type = CIFS_NO_BUFFER;
                        if ((open_file->invalidHandle) && 
                            (!open_file->closePend)) {
                                rc = cifs_reopen_file(file->f_dentry->d_inode,
@@ -1434,20 +1431,22 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
                                        break;
                        }
                        rc = CIFSSMBRead(xid, pTcon,
-                                       open_file->netfid,
-                                       current_read_size, *poffset,
-                                       &bytes_read, &smb_read_data);
+                                        open_file->netfid,
+                                        current_read_size, *poffset,
+                                        &bytes_read, &smb_read_data,
+                                        &buf_type);
                        pSMBr = (struct smb_com_read_rsp *)smb_read_data;
                        if (copy_to_user(current_offset, 
                                         smb_read_data + 4 /* RFC1001 hdr */
                                         + le16_to_cpu(pSMBr->DataOffset), 
                                         bytes_read)) {
                                rc = -EFAULT;
-                               FreeXid(xid);
-                               return rc;
-            }
+                       }
                        if (smb_read_data) {
-                               cifs_buf_release(smb_read_data);
+                               if(buf_type == CIFS_SMALL_BUFFER)
+                                       cifs_small_buf_release(smb_read_data);
+                               else if(buf_type == CIFS_LARGE_BUFFER)
+                                       cifs_buf_release(smb_read_data);
                                smb_read_data = NULL;
                        }
                }
@@ -1480,6 +1479,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
        int xid;
        char *current_offset;
        struct cifsFileInfo *open_file;
+       int buf_type = CIFS_NO_BUFFER;
 
        xid = GetXid();
        cifs_sb = CIFS_SB(file->f_dentry->d_sb);
@@ -1516,9 +1516,10 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
                                        break;
                        }
                        rc = CIFSSMBRead(xid, pTcon,
-                                       open_file->netfid,
-                                       current_read_size, *poffset,
-                                       &bytes_read, &current_offset);
+                                        open_file->netfid,
+                                        current_read_size, *poffset,
+                                        &bytes_read, &current_offset,
+                                        &buf_type);
                }
                if (rc || (bytes_read == 0)) {
                        if (total_read) {
@@ -1616,6 +1617,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
        struct smb_com_read_rsp *pSMBr;
        struct pagevec lru_pvec;
        struct cifsFileInfo *open_file;
+       int buf_type = CIFS_NO_BUFFER;
 
        xid = GetXid();
        if (file->private_data == NULL) {
@@ -1672,14 +1674,17 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                        }
 
                        rc = CIFSSMBRead(xid, pTcon,
-                                       open_file->netfid,
-                                       read_size, offset,
-                                       &bytes_read, &smb_read_data);
-
+                                        open_file->netfid,
+                                        read_size, offset,
+                                        &bytes_read, &smb_read_data,
+                                        &buf_type);
                        /* BB more RC checks ? */
                        if (rc== -EAGAIN) {
                                if (smb_read_data) {
-                                       cifs_buf_release(smb_read_data);
+                                       if(buf_type == CIFS_SMALL_BUFFER)
+                                               cifs_small_buf_release(smb_read_data);
+                                       else if(buf_type == CIFS_LARGE_BUFFER)
+                                               cifs_buf_release(smb_read_data);
                                        smb_read_data = NULL;
                                }
                        }
@@ -1736,7 +1741,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                        break;
                }
                if (smb_read_data) {
-                       cifs_buf_release(smb_read_data);
+                       if(buf_type == CIFS_SMALL_BUFFER)
+                               cifs_small_buf_release(smb_read_data);
+                       else if(buf_type == CIFS_LARGE_BUFFER)
+                               cifs_buf_release(smb_read_data);
                        smb_read_data = NULL;
                }
                bytes_read = 0;
@@ -1746,7 +1754,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
 
 /* need to free smb_read_data buf before exit */
        if (smb_read_data) {
-               cifs_buf_release(smb_read_data);
+               if(buf_type == CIFS_SMALL_BUFFER)
+                       cifs_small_buf_release(smb_read_data);
+               else if(buf_type == CIFS_LARGE_BUFFER)
+                       cifs_buf_release(smb_read_data);
                smb_read_data = NULL;
        } 
 
@@ -1825,10 +1836,20 @@ int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
                open_file =  find_writable_file(cifsInode);
  
        if(open_file) {
+               struct cifs_sb_info *cifs_sb;
+
                /* there is not actually a write pending so let
                this handle go free and allow it to
                be closable if needed */
                atomic_dec(&open_file->wrtPending);
+
+               cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
+               if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
+                       /* since no page cache to corrupt on directio 
+                       we can change size safely */
+                       return 1;
+               }
+
                return 0;
        } else
                return 1;
@@ -1873,9 +1894,7 @@ struct address_space_operations cifs_addr_ops = {
        .readpage = cifs_readpage,
        .readpages = cifs_readpages,
        .writepage = cifs_writepage,
-#ifdef CONFIG_CIFS_EXPERIMENTAL
        .writepages = cifs_writepages,
-#endif
        .prepare_write = cifs_prepare_write,
        .commit_write = cifs_commit_write,
        .set_page_dirty = __set_page_dirty_nobuffers,
index 3ebce9430f4a9f6af94085f998d4682027d01edc..59359911f4810c601923261c94cd49ffeebf5542 100644 (file)
@@ -229,11 +229,12 @@ static int decode_sfu_inode(struct inode * inode, __u64 size,
                         cifs_sb->mnt_cifs_flags &
                                CIFS_MOUNT_MAP_SPECIAL_CHR);
        if (rc==0) {
+               int buf_type = CIFS_NO_BUFFER;
                        /* Read header */
                rc = CIFSSMBRead(xid, pTcon,
                                 netfid,
                                 24 /* length */, 0 /* offset */,
-                                &bytes_read, &pbuf);
+                                &bytes_read, &pbuf, &buf_type);
                if((rc == 0) && (bytes_read >= 8)) {
                        if(memcmp("IntxBLK", pbuf, 8) == 0) {
                                cFYI(1,("Block device"));
@@ -267,7 +268,7 @@ static int decode_sfu_inode(struct inode * inode, __u64 size,
                } else {
                        inode->i_mode |= S_IFREG; /* then it is a file */
                        rc = -EOPNOTSUPP; /* or some unknown SFU type */        
-               }
+               }               
                CIFSSMBClose(xid, pTcon, netfid);
        }
        return rc;
@@ -750,8 +751,8 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
                        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
                                CIFSSMBUnixSetPerms(xid, pTcon, full_path,
                                                    mode,
-                                                   (__u64)current->euid,
-                                                   (__u64)current->egid,
+                                                   (__u64)current->fsuid,
+                                                   (__u64)current->fsgid,
                                                    0 /* dev_t */,
                                                    cifs_sb->local_nls,
                                                    cifs_sb->mnt_cifs_flags &
index 94baf6c8ecbda85946984cd2e4d524034cc94644..812c6bb0fe38adff74cc25c8dc7848cbae3315bd 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *   fs/cifs/misc.c
  *
- *   Copyright (C) International Business Machines  Corp., 2002,2004
+ *   Copyright (C) International Business Machines  Corp., 2002,2005
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
@@ -161,6 +161,9 @@ cifs_buf_get(void)
        if (ret_buf) {
                memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
                atomic_inc(&bufAllocCount);
+#ifdef CONFIG_CIFS_STATS2
+               atomic_inc(&totBufAllocCount);
+#endif /* CONFIG_CIFS_STATS2 */
        }
 
        return ret_buf;
@@ -195,6 +198,10 @@ cifs_small_buf_get(void)
        /* No need to clear memory here, cleared in header assemble */
        /*      memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
                atomic_inc(&smBufAllocCount);
+#ifdef CONFIG_CIFS_STATS2
+               atomic_inc(&totSmBufAllocCount);
+#endif /* CONFIG_CIFS_STATS2 */
+
        }
        return ret_buf;
 }
@@ -292,7 +299,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
        struct cifsSesInfo * ses;
        char *temp = (char *) buffer;
 
-       memset(temp,0,MAX_CIFS_HDR_SIZE);
+       memset(temp,0,256); /* bigger than MAX_CIFS_HDR_SIZE */
 
        buffer->smb_buf_length =
            (2 * word_count) + sizeof (struct smb_hdr) -
@@ -348,12 +355,12 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
                /*  BB Add support for establishing new tCon and SMB Session  */
                /*      with userid/password pairs found on the smb session   */ 
                /*      for other target tcp/ip addresses               BB    */
-                               if(current->uid != treeCon->ses->linux_uid) {
-                                       cFYI(1,("Multiuser mode and UID did not match tcon uid "));
+                               if(current->fsuid != treeCon->ses->linux_uid) {
+                                       cFYI(1,("Multiuser mode and UID did not match tcon uid"));
                                        read_lock(&GlobalSMBSeslock);
                                        list_for_each(temp_item, &GlobalSMBSessionList) {
                                                ses = list_entry(temp_item, struct cifsSesInfo, cifsSessionList);
-                                               if(ses->linux_uid == current->uid) {
+                                               if(ses->linux_uid == current->fsuid) {
                                                        if(ses->server == treeCon->ses->server) {
                                                                cFYI(1,("found matching uid substitute right smb_uid"));  
                                                                buffer->Uid = ses->Suid;
index 9bdaaecae36f683ef52196b4d54cee256b006029..288cc048d37f1e5b2373c8dd04157bcc90383bcc 100644 (file)
@@ -214,8 +214,7 @@ static void fill_in_inode(struct inode *tmp_inode,
                        tmp_inode->i_fop = &cifs_file_nobrl_ops;
                else
                        tmp_inode->i_fop = &cifs_file_ops;
-               if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
-                       tmp_inode->i_fop->lock = NULL;
+
                tmp_inode->i_data.a_ops = &cifs_addr_ops;
                if((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
                   (cifs_sb->tcon->ses->server->maxBuf <
@@ -327,12 +326,18 @@ static void unix_fill_in_inode(struct inode *tmp_inode,
        if (S_ISREG(tmp_inode->i_mode)) {
                cFYI(1, ("File inode"));
                tmp_inode->i_op = &cifs_file_inode_ops;
-               if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
-                       tmp_inode->i_fop = &cifs_file_direct_ops;
+
+               if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
+                       if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+                               tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
+                       else
+                               tmp_inode->i_fop = &cifs_file_direct_ops;
+               
+               } else if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+                       tmp_inode->i_fop = &cifs_file_nobrl_ops;
                else
                        tmp_inode->i_fop = &cifs_file_ops;
-               if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
-                       tmp_inode->i_fop->lock = NULL;
+
                tmp_inode->i_data.a_ops = &cifs_addr_ops;
                if((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
                   (cifs_sb->tcon->ses->server->maxBuf < 
index 9222033cad8ec34a5623b21402ca3998c394344a..aede606132aaa091028aa515b60f5addc6911a69 100644 (file)
 /* NB: unlike smb/cifs packets, the RFC1002 structures are big endian */
 
        /* RFC 1002 session packet types */
-#define RFC1002_SESSION_MESASAGE 0x00
+#define RFC1002_SESSION_MESSAGE 0x00
 #define RFC1002_SESSION_REQUEST  0x81
 #define RFC1002_POSITIVE_SESSION_RESPONSE 0x82
 #define RFC1002_NEGATIVE_SESSION_RESPONSE 0x83
-#define RFC1002_RETARGET_SESSION_RESPONSE 0x83
+#define RFC1002_RETARGET_SESSION_RESPONSE 0x84
 #define RFC1002_SESSION_KEEP_ALIVE 0x85
 
        /* RFC 1002 flags (only one defined */
index f8871196098c9abe371a64a1df5d269cb407f503..7b98792150ea7ebba3d449e994906483dcadf4a3 100644 (file)
@@ -206,7 +206,6 @@ smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
        return rc;
 }
 
-#ifdef CONFIG_CIFS_EXPERIMENTAL
 static int
 smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
          struct sockaddr *sin)
@@ -299,7 +298,7 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
 
 int
 SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, 
-            struct kvec *iov, int n_vec, int *pbytes_returned,
+            struct kvec *iov, int n_vec, int * pRespBufType /* ret */, 
             const int long_op)
 {
        int rc = 0;
@@ -307,6 +306,8 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
        unsigned long timeout;
        struct mid_q_entry *midQ;
        struct smb_hdr *in_buf = iov[0].iov_base;
+       
+       *pRespBufType = CIFS_NO_BUFFER;  /* no response buf yet */
 
        if (ses == NULL) {
                cERROR(1,("Null smb session"));
@@ -392,8 +393,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
                return -ENOMEM;
        }
 
-/* BB FIXME */
-/*     rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number); */
+       rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
 
        midQ->midState = MID_REQUEST_SUBMITTED;
 #ifdef CONFIG_CIFS_STATS2
@@ -489,21 +489,23 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
                        receive_len, xid));
                rc = -EIO;
        } else {                /* rcvd frame is ok */
-
                if (midQ->resp_buf && 
                        (midQ->midState == MID_RESPONSE_RECEIVED)) {
-                       in_buf->smb_buf_length = receive_len;
-                       /* BB verify that length would not overrun small buf */
-                       memcpy((char *)in_buf + 4,
-                              (char *)midQ->resp_buf + 4,
-                              receive_len);
 
-                       dump_smb(in_buf, 80);
+                       iov[0].iov_base = (char *)midQ->resp_buf;
+                       if(midQ->largeBuf)
+                               *pRespBufType = CIFS_LARGE_BUFFER;
+                       else
+                               *pRespBufType = CIFS_SMALL_BUFFER;
+                       iov[0].iov_len = receive_len + 4;
+                       iov[1].iov_len = 0;
+
+                       dump_smb(midQ->resp_buf, 80);
                        /* convert the length into a more usable form */
                        if((receive_len > 24) &&
                           (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
                                        SECMODE_SIGN_ENABLED))) {
-                               rc = cifs_verify_signature(in_buf,
+                               rc = cifs_verify_signature(midQ->resp_buf,
                                                ses->server->mac_signing_key,
                                                midQ->sequence_number+1);
                                if(rc) {
@@ -512,18 +514,19 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
                                }
                        }
 
-                       *pbytes_returned = in_buf->smb_buf_length;
-
                        /* BB special case reconnect tid and uid here? */
                        /* BB special case Errbadpassword and pwdexpired here */
-                       rc = map_smb_to_linux_error(in_buf);
+                       rc = map_smb_to_linux_error(midQ->resp_buf);
 
                        /* convert ByteCount if necessary */
                        if (receive_len >=
                            sizeof (struct smb_hdr) -
                            4 /* do not count RFC1001 header */  +
-                           (2 * in_buf->WordCount) + 2 /* bcc */ )
-                               BCC(in_buf) = le16_to_cpu(BCC_LE(in_buf));
+                           (2 * midQ->resp_buf->WordCount) + 2 /* bcc */ )
+                               BCC(midQ->resp_buf) = 
+                                       le16_to_cpu(BCC_LE(midQ->resp_buf));
+                       midQ->resp_buf = NULL;  /* mark it so will not be freed
+                                               by DeleteMidQEntry */
                } else {
                        rc = -EIO;
                        cFYI(1,("Bad MID state?"));
@@ -549,7 +552,6 @@ out_unlock2:
 
        return rc;
 }
-#endif /* CIFS_EXPERIMENTAL */
 
 int
 SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
@@ -790,7 +792,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
                                BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
                } else {
                        rc = -EIO;
-                       cERROR(1,("Bad MID state? "));
+                       cERROR(1,("Bad MID state?"));
                }
        }
 cifs_no_response_exit:
index f375f87c7dbd3698a07290a9953e219cf49b0513..777e3363c2a4dd95ec3c05f51678ae3810369229 100644 (file)
@@ -254,7 +254,8 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
                rc = CIFSSMBQueryEA(xid,pTcon,full_path,ea_name,ea_value,
                        buf_size, cifs_sb->local_nls,
                        cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
-       } else if(strncmp(ea_name,POSIX_ACL_XATTR_ACCESS,strlen(POSIX_ACL_XATTR_ACCESS)) == 0) {
+       } else if(strncmp(ea_name,POSIX_ACL_XATTR_ACCESS,
+                         strlen(POSIX_ACL_XATTR_ACCESS)) == 0) {
 #ifdef CONFIG_CIFS_POSIX
                if(sb->s_flags & MS_POSIXACL)
                        rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
@@ -262,10 +263,27 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
                                cifs_sb->local_nls,
                                cifs_sb->mnt_cifs_flags & 
                                        CIFS_MOUNT_MAP_SPECIAL_CHR);
+/*             else if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
+                       __u16 fid;
+                       int oplock = FALSE;
+                       rc = CIFSSMBOpen(xid, pTcon, full_path,
+                                        FILE_OPEN, GENERIC_READ, 0, &fid,
+                                        &oplock, NULL, cifs_sb->local_nls,
+                                        cifs_sb->mnt_cifs_flags &
+                                        CIFS_MOUNT_MAP_SPECIAL_CHR);
+                       if(rc == 0) {
+                               rc = CIFSSMBGetCIFSACL(xid, pTcon, fid,
+                                       ea_value, buf_size,
+                                       ACL_TYPE_ACCESS);
+                               CIFSSMBClose(xid, pTcon, fid)
+                       }
+               } */  /* BB enable after fixing up return data */
+                               
 #else 
                cFYI(1,("query POSIX ACL not supported yet"));
 #endif /* CONFIG_CIFS_POSIX */
-       } else if(strncmp(ea_name,POSIX_ACL_XATTR_DEFAULT,strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
+       } else if(strncmp(ea_name,POSIX_ACL_XATTR_DEFAULT,
+                         strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
 #ifdef CONFIG_CIFS_POSIX
                if(sb->s_flags & MS_POSIXACL)
                        rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
index 2468ac1df2f04cf5cfc1bde19b4924c9f135ce09..ff0bafcff7204ac1dc24204ef0c3a47f919eed6c 100644 (file)
@@ -53,6 +53,8 @@
 #include <asm/mmu_context.h>
 #include <asm/ioctls.h>
 
+extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat);
+
 /*
  * Not all architectures have sys_utime, so implement this in terms
  * of sys_utimes.
@@ -68,10 +70,10 @@ asmlinkage long compat_sys_utime(char __user *filename, struct compat_utimbuf __
                tv[0].tv_usec = 0;
                tv[1].tv_usec = 0;
        }
-       return do_utimes(filename, t ? tv : NULL);
+       return do_utimes(AT_FDCWD, filename, t ? tv : NULL);
 }
 
-asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval __user *t)
+asmlinkage long compat_sys_futimesat(int dfd, char __user *filename, struct compat_timeval __user *t)
 {
        struct timeval tv[2];
 
@@ -82,14 +84,19 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
                    get_user(tv[1].tv_usec, &t[1].tv_usec))
                        return -EFAULT; 
        } 
-       return do_utimes(filename, t ? tv : NULL);
+       return do_utimes(dfd, filename, t ? tv : NULL);
+}
+
+asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval __user *t)
+{
+       return compat_sys_futimesat(AT_FDCWD, filename, t);
 }
 
 asmlinkage long compat_sys_newstat(char __user * filename,
                struct compat_stat __user *statbuf)
 {
        struct kstat stat;
-       int error = vfs_stat(filename, &stat);
+       int error = vfs_stat_fd(AT_FDCWD, filename, &stat);
 
        if (!error)
                error = cp_compat_stat(&stat, statbuf);
@@ -100,10 +107,31 @@ asmlinkage long compat_sys_newlstat(char __user * filename,
                struct compat_stat __user *statbuf)
 {
        struct kstat stat;
-       int error = vfs_lstat(filename, &stat);
+       int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);
+
+       if (!error)
+               error = cp_compat_stat(&stat, statbuf);
+       return error;
+}
+
+asmlinkage long compat_sys_newfstatat(int dfd, char __user *filename,
+               struct compat_stat __user *statbuf, int flag)
+{
+       struct kstat stat;
+       int error = -EINVAL;
+
+       if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
+               goto out;
+
+       if (flag & AT_SYMLINK_NOFOLLOW)
+               error = vfs_lstat_fd(dfd, filename, &stat);
+       else
+               error = vfs_stat_fd(dfd, filename, &stat);
 
        if (!error)
                error = cp_compat_stat(&stat, statbuf);
+
+out:
        return error;
 }
 
@@ -1290,7 +1318,17 @@ out:
 asmlinkage long
 compat_sys_open(const char __user *filename, int flags, int mode)
 {
-       return do_sys_open(filename, flags, mode);
+       return do_sys_open(AT_FDCWD, filename, flags, mode);
+}
+
+/*
+ * Exactly like fs/open.c:sys_openat(), except that it doesn't set the
+ * O_LARGEFILE flag.
+ */
+asmlinkage long
+compat_sys_openat(int dfd, const char __user *filename, int flags, int mode)
+{
+       return do_sys_open(dfd, filename, flags, mode);
 }
 
 /*
@@ -1621,36 +1659,14 @@ static void select_bits_free(void *bits, int size)
 #define MAX_SELECT_SECONDS \
        ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
 
-asmlinkage long
-compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp,
-               compat_ulong_t __user *exp, struct compat_timeval __user *tvp)
+int compat_core_sys_select(int n, compat_ulong_t __user *inp,
+       compat_ulong_t __user *outp, compat_ulong_t __user *exp, s64 *timeout)
 {
        fd_set_bits fds;
        char *bits;
-       long timeout;
        int size, max_fdset, ret = -EINVAL;
        struct fdtable *fdt;
 
-       timeout = MAX_SCHEDULE_TIMEOUT;
-       if (tvp) {
-               time_t sec, usec;
-
-               if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp))
-                   || __get_user(sec, &tvp->tv_sec)
-                   || __get_user(usec, &tvp->tv_usec)) {
-                       ret = -EFAULT;
-                       goto out_nofds;
-               }
-
-               if (sec < 0 || usec < 0)
-                       goto out_nofds;
-
-               if ((unsigned long) sec < MAX_SELECT_SECONDS) {
-                       timeout = ROUND_UP(usec, 1000000/HZ);
-                       timeout += sec * (unsigned long) HZ;
-               }
-       }
-
        if (n < 0)
                goto out_nofds;
 
@@ -1687,19 +1703,7 @@ compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp
        zero_fd_set(n, fds.res_out);
        zero_fd_set(n, fds.res_ex);
 
-       ret = do_select(n, &fds, &timeout);
-
-       if (tvp && !(current->personality & STICKY_TIMEOUTS)) {
-               time_t sec = 0, usec = 0;
-               if (timeout) {
-                       sec = timeout / HZ;
-                       usec = timeout % HZ;
-                       usec *= (1000000/HZ);
-               }
-               if (put_user(sec, &tvp->tv_sec) ||
-                   put_user(usec, &tvp->tv_usec))
-                       ret = -EFAULT;
-       }
+       ret = do_select(n, &fds, timeout);
 
        if (ret < 0)
                goto out;
@@ -1720,6 +1724,224 @@ out_nofds:
        return ret;
 }
 
+asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
+       compat_ulong_t __user *outp, compat_ulong_t __user *exp,
+       struct compat_timeval __user *tvp)
+{
+       s64 timeout = -1;
+       struct compat_timeval tv;
+       int ret;
+
+       if (tvp) {
+               if (copy_from_user(&tv, tvp, sizeof(tv)))
+                       return -EFAULT;
+
+               if (tv.tv_sec < 0 || tv.tv_usec < 0)
+                       return -EINVAL;
+
+               /* Cast to u64 to make GCC stop complaining */
+               if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS)
+                       timeout = -1;   /* infinite */
+               else {
+                       timeout = ROUND_UP(tv.tv_usec, 1000000/HZ);
+                       timeout += tv.tv_sec * HZ;
+               }
+       }
+
+       ret = compat_core_sys_select(n, inp, outp, exp, &timeout);
+
+       if (tvp) {
+               if (current->personality & STICKY_TIMEOUTS)
+                       goto sticky;
+               tv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ));
+               tv.tv_sec = timeout;
+               if (copy_to_user(tvp, &tv, sizeof(tv))) {
+sticky:
+                       /*
+                        * If an application puts its timeval in read-only
+                        * memory, we don't want the Linux-specific update to
+                        * the timeval to cause a fault after the select has
+                        * completed successfully. However, because we're not
+                        * updating the timeval, we can't restart the system
+                        * call.
+                        */
+                       if (ret == -ERESTARTNOHAND)
+                               ret = -EINTR;
+               }
+       }
+
+       return ret;
+}
+
+#ifdef TIF_RESTORE_SIGMASK
+asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
+       compat_ulong_t __user *outp, compat_ulong_t __user *exp,
+       struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
+       compat_size_t sigsetsize)
+{
+       compat_sigset_t ss32;
+       sigset_t ksigmask, sigsaved;
+       long timeout = MAX_SCHEDULE_TIMEOUT;
+       struct compat_timespec ts;
+       int ret;
+
+       if (tsp) {
+               if (copy_from_user(&ts, tsp, sizeof(ts)))
+                       return -EFAULT;
+
+               if (ts.tv_sec < 0 || ts.tv_nsec < 0)
+                       return -EINVAL;
+       }
+
+       if (sigmask) {
+               if (sigsetsize != sizeof(compat_sigset_t))
+                       return -EINVAL;
+               if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
+                       return -EFAULT;
+               sigset_from_compat(&ksigmask, &ss32);
+
+               sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
+               sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
+       }
+
+       do {
+               if (tsp) {
+                       if ((unsigned long)ts.tv_sec < MAX_SELECT_SECONDS) {
+                               timeout = ROUND_UP(ts.tv_nsec, 1000000000/HZ);
+                               timeout += ts.tv_sec * (unsigned long)HZ;
+                               ts.tv_sec = 0;
+                               ts.tv_nsec = 0;
+                       } else {
+                               ts.tv_sec -= MAX_SELECT_SECONDS;
+                               timeout = MAX_SELECT_SECONDS * HZ;
+                       }
+               }
+
+               ret = compat_core_sys_select(n, inp, outp, exp, &timeout);
+
+       } while (!ret && !timeout && tsp && (ts.tv_sec || ts.tv_nsec));
+
+       if (tsp && !(current->personality & STICKY_TIMEOUTS)) {
+               ts.tv_sec += timeout / HZ;
+               ts.tv_nsec += (timeout % HZ) * (1000000000/HZ);
+               if (ts.tv_nsec >= 1000000000) {
+                       ts.tv_sec++;
+                       ts.tv_nsec -= 1000000000;
+               }
+               (void)copy_to_user(tsp, &ts, sizeof(ts));
+       }
+
+       if (ret == -ERESTARTNOHAND) {
+               /*
+                * Don't restore the signal mask yet. Let do_signal() deliver
+                * the signal on the way back to userspace, before the signal
+                * mask is restored.
+                */
+               if (sigmask) {
+                       memcpy(&current->saved_sigmask, &sigsaved,
+                                       sizeof(sigsaved));
+                       set_thread_flag(TIF_RESTORE_SIGMASK);
+               }
+       } else if (sigmask)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       return ret;
+}
+
+asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
+       compat_ulong_t __user *outp, compat_ulong_t __user *exp,
+       struct compat_timespec __user *tsp, void __user *sig)
+{
+       compat_size_t sigsetsize = 0;
+       compat_uptr_t up = 0;
+
+       if (sig) {
+               if (!access_ok(VERIFY_READ, sig,
+                               sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
+                       __get_user(up, (compat_uptr_t __user *)sig) ||
+                       __get_user(sigsetsize,
+                               (compat_size_t __user *)(sig+sizeof(up))))
+                       return -EFAULT;
+       }
+       return compat_sys_pselect7(n, inp, outp, exp, tsp, compat_ptr(up),
+                                       sigsetsize);
+}
+
+asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
+       unsigned int nfds, struct compat_timespec __user *tsp,
+       const compat_sigset_t __user *sigmask, compat_size_t sigsetsize)
+{
+       compat_sigset_t ss32;
+       sigset_t ksigmask, sigsaved;
+       struct compat_timespec ts;
+       s64 timeout = -1;
+       int ret;
+
+       if (tsp) {
+               if (copy_from_user(&ts, tsp, sizeof(ts)))
+                       return -EFAULT;
+
+               /* We assume that ts.tv_sec is always lower than
+                  the number of seconds that can be expressed in
+                  an s64. Otherwise the compiler bitches at us */
+               timeout = ROUND_UP(ts.tv_nsec, 1000000000/HZ);
+               timeout += ts.tv_sec * HZ;
+       }
+
+       if (sigmask) {
+               if (sigsetsize |= sizeof(compat_sigset_t))
+                       return -EINVAL;
+               if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
+                       return -EFAULT;
+               sigset_from_compat(&ksigmask, &ss32);
+
+               sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
+               sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
+       }
+
+       ret = do_sys_poll(ufds, nfds, &timeout);
+
+       /* We can restart this syscall, usually */
+       if (ret == -EINTR) {
+               /*
+                * Don't restore the signal mask yet. Let do_signal() deliver
+                * the signal on the way back to userspace, before the signal
+                * mask is restored.
+                */
+               if (sigmask) {
+                       memcpy(&current->saved_sigmask, &sigsaved,
+                               sizeof(sigsaved));
+                       set_thread_flag(TIF_RESTORE_SIGMASK);
+               }
+               ret = -ERESTARTNOHAND;
+       } else if (sigmask)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       if (tsp && timeout >= 0) {
+               if (current->personality & STICKY_TIMEOUTS)
+                       goto sticky;
+               /* Yes, we know it's actually an s64, but it's also positive. */
+               ts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 1000;
+               ts.tv_sec = timeout;
+               if (copy_to_user(tsp, &ts, sizeof(ts))) {
+sticky:
+                       /*
+                        * If an application puts its timeval in read-only
+                        * memory, we don't want the Linux-specific update to
+                        * the timeval to cause a fault after the select has
+                        * completed successfully. However, because we're not
+                        * updating the timeval, we can't restart the system
+                        * call.
+                        */
+                       if (ret == -ERESTARTNOHAND && timeout >= 0)
+                               ret = -EINTR;
+               }
+       }
+
+       return ret;
+}
+#endif /* TIF_RESTORE_SIGMASK */
+
 #if defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)
 /* Stuff for NFS server syscalls... */
 struct compat_nfsctl_svc {
index 62b40af68cc4e40da9c749ce36bef9816894a003..055378d2513e87359b899cafc2b981e1bd392407 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -477,7 +477,7 @@ struct file *open_exec(const char *name)
        int err;
        struct file *file;
 
-       err = path_lookup_open(name, LOOKUP_FOLLOW, &nd, FMODE_READ);
+       err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, FMODE_READ);
        file = ERR_PTR(err);
 
        if (!err) {
index 5bfe40085fbc7bc75f4f3be02280dee65d26f131..b06b54f1bbbbda611e39252d2df5a86b6afaa3c8 100644 (file)
@@ -11,6 +11,33 @@ struct export_operations export_op_default;
 
 #define dprintk(fmt, args...) do{}while(0)
 
+static struct dentry *
+find_acceptable_alias(struct dentry *result,
+               int (*acceptable)(void *context, struct dentry *dentry),
+               void *context)
+{
+       struct dentry *dentry, *toput = NULL;
+
+       spin_lock(&dcache_lock);
+       list_for_each_entry(dentry, &result->d_inode->i_dentry, d_alias) {
+               dget_locked(dentry);
+               spin_unlock(&dcache_lock);
+               if (toput)
+                       dput(toput);
+               if (dentry != result && acceptable(context, dentry)) {
+                       dput(result);
+                       return dentry;
+               }
+               spin_lock(&dcache_lock);
+               toput = dentry;
+       }
+       spin_unlock(&dcache_lock);
+
+       if (toput)
+               dput(toput);
+       return NULL;
+}
+
 /**
  * find_exported_dentry - helper routine to implement export_operations->decode_fh
  * @sb:                The &super_block identifying the filesystem
@@ -52,8 +79,7 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
        struct dentry *target_dir;
        int err;
        struct export_operations *nops = sb->s_export_op;
-       struct list_head *le, *head;
-       struct dentry *toput = NULL;
+       struct dentry *alias;
        int noprogress;
        char nbuf[NAME_MAX+1];
 
@@ -79,27 +105,10 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
                        /* there is no other dentry, so fail */
                        goto err_result;
                }
-               /* try any other aliases */
-               spin_lock(&dcache_lock);
-               head = &result->d_inode->i_dentry;
-               list_for_each(le, head) {
-                       struct dentry *dentry = list_entry(le, struct dentry, d_alias);
-                       dget_locked(dentry);
-                       spin_unlock(&dcache_lock);
-                       if (toput)
-                               dput(toput);
-                       toput = NULL;
-                       if (dentry != result &&
-                           acceptable(context, dentry)) {
-                               dput(result);
-                               return dentry;
-                       }
-                       spin_lock(&dcache_lock);
-                       toput = dentry;
-               }
-               spin_unlock(&dcache_lock);
-               if (toput)
-                       dput(toput);
+
+               alias = find_acceptable_alias(result, acceptable, context);
+               if (alias)
+                       return alias;
        }                       
 
        /* It's a directory, or we are required to confirm the file's
@@ -258,26 +267,10 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
        /* now result is properly connected, it is our best bet */
        if (acceptable(context, result))
                return result;
-       /* one last try of the aliases.. */
-       spin_lock(&dcache_lock);
-       toput = NULL;
-       head = &result->d_inode->i_dentry;
-       list_for_each(le, head) {
-               struct dentry *dentry = list_entry(le, struct dentry, d_alias);
-               dget_locked(dentry);
-               spin_unlock(&dcache_lock);
-               if (toput) dput(toput);
-               if (dentry != result &&
-                   acceptable(context, dentry)) {
-                       dput(result);
-                       return dentry;
-               }
-               spin_lock(&dcache_lock);
-               toput = dentry;
-       }
-       spin_unlock(&dcache_lock);
-       if (toput)
-               dput(toput);
+
+       alias = find_acceptable_alias(result, acceptable, context);
+       if (alias)
+               return alias;
 
        /* drat - I just cannot find anything acceptable */
        dput(result);
index 89450ae322280b499906611f208fdd299aa08a7e..f13f1494d4fe8099ca8779c8aaf0771ca1ec95c9 100644 (file)
@@ -64,7 +64,6 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
                else
                        e = rec - 1;
        } while (b <= e);
-       //printk("%d: %d,%d,%d\n", bnode->this, b, e, rec);
        if (rec != e && e >= 0) {
                len = hfs_brec_lenoff(bnode, e, &off);
                keylen = hfs_brec_keylen(bnode, e);
@@ -127,7 +126,7 @@ int hfs_brec_find(struct hfs_find_data *fd)
        return res;
 
 invalid:
-       printk("HFS: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n",
+       printk(KERN_ERR "hfs: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n",
                height, bnode->height, bnode->type, nidx, parent);
        res = -EIO;
 release:
index 3d5cdc6847c06f4200b93804710804541a909f67..a7a7d77f3fd3d61e8762f2394117f937f03438e8 100644 (file)
@@ -198,7 +198,7 @@ void hfs_bnode_unlink(struct hfs_bnode *node)
 
        // move down?
        if (!node->prev && !node->next) {
-               printk("hfs_btree_del_level\n");
+               printk(KERN_DEBUG "hfs_btree_del_level\n");
        }
        if (!node->parent) {
                tree->root = 0;
@@ -219,7 +219,7 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
        struct hfs_bnode *node;
 
        if (cnid >= tree->node_count) {
-               printk("HFS: request for non-existent node %d in B*Tree\n", cnid);
+               printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid);
                return NULL;
        }
 
@@ -242,7 +242,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
        loff_t off;
 
        if (cnid >= tree->node_count) {
-               printk("HFS: request for non-existent node %d in B*Tree\n", cnid);
+               printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid);
                return NULL;
        }
 
index 7d8fff2c25fc93f9814a1ee9c5d2baa1f81a7d46..5c87cf4801fcb00f05311d2883c9977dbfa5ef2f 100644 (file)
@@ -362,7 +362,7 @@ again:
                end_off = hfs_bnode_read_u16(parent, end_rec_off);
                if (end_rec_off - end_off < diff) {
 
-                       printk("splitting index node...\n");
+                       printk(KERN_DEBUG "hfs: splitting index node...\n");
                        fd->bnode = parent;
                        new_node = hfs_bnode_split(fd);
                        if (IS_ERR(new_node))
index 394725efa1c8b5c4354e1455ef4d277289d6d5ea..7bb11edd148891694586da25a7e7926b21973a1b 100644 (file)
@@ -111,7 +111,7 @@ void hfs_btree_close(struct hfs_btree *tree)
                while ((node = tree->node_hash[i])) {
                        tree->node_hash[i] = node->next_hash;
                        if (atomic_read(&node->refcnt))
-                               printk("HFS: node %d:%d still has %d user(s)!\n",
+                               printk(KERN_ERR "hfs: node %d:%d still has %d user(s)!\n",
                                        node->tree->cnid, node->this, atomic_read(&node->refcnt));
                        hfs_bnode_free(node);
                        tree->node_hash_cnt--;
@@ -252,7 +252,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
                kunmap(*pagep);
                nidx = node->next;
                if (!nidx) {
-                       printk("create new bmap node...\n");
+                       printk(KERN_DEBUG "hfs: create new bmap node...\n");
                        next_node = hfs_bmap_new_bmap(node, idx);
                } else
                        next_node = hfs_bnode_find(tree, nidx);
@@ -292,7 +292,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
                hfs_bnode_put(node);
                if (!i) {
                        /* panic */;
-                       printk("HFS: unable to free bnode %u. bmap not found!\n", node->this);
+                       printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
                        return;
                }
                node = hfs_bnode_find(tree, i);
@@ -300,7 +300,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
                        return;
                if (node->type != HFS_NODE_MAP) {
                        /* panic */;
-                       printk("HFS: invalid bmap found! (%u,%d)\n", node->this, node->type);
+                       printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
                        hfs_bnode_put(node);
                        return;
                }
@@ -313,7 +313,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
        m = 1 << (~nidx & 7);
        byte = data[off];
        if (!(byte & m)) {
-               printk("HFS: trying to free free bnode %u(%d)\n", node->this, node->type);
+               printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
                kunmap(page);
                hfs_bnode_put(node);
                return;
index 2fcd679f02383366b7f8774ef2ed00d9fe924727..ba851576ebb13233eb2c84b4dc968f2caa2218f6 100644 (file)
@@ -184,7 +184,7 @@ int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
 
        type = rec.type;
        if (type != HFS_CDR_THD && type != HFS_CDR_FTH) {
-               printk("HFS-fs: Found bad thread record in catalog\n");
+               printk(KERN_ERR "hfs: found bad thread record in catalog\n");
                return -EIO;
        }
 
index e1f24befba58b282b9f2aa7af6a5b4628cf6533d..534e5a7480efc8626c00347fa4e22e9d9dc412ee 100644 (file)
@@ -81,12 +81,12 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
        case 1:
                hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
                if (entry.type != HFS_CDR_THD) {
-                       printk("HFS: bad catalog folder thread\n");
+                       printk(KERN_ERR "hfs: bad catalog folder thread\n");
                        err = -EIO;
                        goto out;
                }
                //if (fd.entrylength < HFS_MIN_THREAD_SZ) {
-               //      printk("HFS: truncated catalog thread\n");
+               //      printk(KERN_ERR "hfs: truncated catalog thread\n");
                //      err = -EIO;
                //      goto out;
                //}
@@ -105,7 +105,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
 
        for (;;) {
                if (be32_to_cpu(fd.key->cat.ParID) != inode->i_ino) {
-                       printk("HFS: walked past end of dir\n");
+                       printk(KERN_ERR "hfs: walked past end of dir\n");
                        err = -EIO;
                        goto out;
                }
@@ -114,7 +114,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName);
                if (type == HFS_CDR_DIR) {
                        if (fd.entrylength < sizeof(struct hfs_cat_dir)) {
-                               printk("HFS: small dir entry\n");
+                               printk(KERN_ERR "hfs: small dir entry\n");
                                err = -EIO;
                                goto out;
                        }
@@ -123,7 +123,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                break;
                } else if (type == HFS_CDR_FIL) {
                        if (fd.entrylength < sizeof(struct hfs_cat_file)) {
-                               printk("HFS: small file entry\n");
+                               printk(KERN_ERR "hfs: small file entry\n");
                                err = -EIO;
                                goto out;
                        }
@@ -131,7 +131,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                    be32_to_cpu(entry.file.FlNum), DT_REG))
                                break;
                } else {
-                       printk("HFS: bad catalog entry type %d\n", type);
+                       printk(KERN_ERR "hfs: bad catalog entry type %d\n", type);
                        err = -EIO;
                        goto out;
                }
index cc5dcd52e23dc7d1de6a1f6c508b23eeffcd4e66..18ce47ab1b71f0d27044e3831c69444ccb33740f 100644 (file)
@@ -35,9 +35,6 @@
 #define dprint(flg, fmt, args...) \
        if (flg & DBG_MASK) printk(fmt , ## args)
 
-#define hfs_warn(format, args...) printk(KERN_WARNING format , ## args)
-#define hfs_error(format, args...) printk(KERN_ERR format , ## args)
-
 /*
  * struct hfs_inode_info
  *
index 050a49276499c790e9ec2df2647fabeec71b0682..39fd85b9b91613136867b4b2690a93590b7288e9 100644 (file)
@@ -95,7 +95,6 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
                } while (--i && nidx < tree->node_count);
                spin_unlock(&tree->hash_lock);
        }
-       //printk("releasepage: %lu,%x = %d\n", page->index, mask, res);
        return res ? try_to_free_buffers(page) : 0;
 }
 
index 0a473f79c89feb192b71dba3f66a7951536c86f8..b4651e128d7fbb8e25a3b1767ef1f967ea08b6f0 100644 (file)
@@ -47,7 +47,7 @@ static int hfs_get_last_session(struct super_block *sb,
                        *start = (sector_t)te.cdte_addr.lba << 2;
                        return 0;
                }
-               printk(KERN_ERR "HFS: Invalid session number or type of track\n");
+               printk(KERN_ERR "hfs: invalid session number or type of track\n");
                return -EINVAL;
        }
        ms_info.addr_format = CDROM_LBA;
@@ -100,7 +100,7 @@ int hfs_mdb_get(struct super_block *sb)
 
        HFS_SB(sb)->alloc_blksz = size = be32_to_cpu(mdb->drAlBlkSiz);
        if (!size || (size & (HFS_SECTOR_SIZE - 1))) {
-               hfs_warn("hfs_fs: bad allocation block size %d\n", size);
+               printk(KERN_ERR "hfs: bad allocation block size %d\n", size);
                goto out_bh;
        }
 
@@ -117,7 +117,7 @@ int hfs_mdb_get(struct super_block *sb)
                size >>= 1;
        brelse(bh);
        if (!sb_set_blocksize(sb, size)) {
-               printk("hfs_fs: unable to set blocksize to %u\n", size);
+               printk(KERN_ERR "hfs: unable to set blocksize to %u\n", size);
                goto out;
        }
 
@@ -161,8 +161,8 @@ int hfs_mdb_get(struct super_block *sb)
        }
 
        if (!HFS_SB(sb)->alt_mdb) {
-               hfs_warn("hfs_fs: unable to locate alternate MDB\n");
-               hfs_warn("hfs_fs: continuing without an alternate MDB\n");
+               printk(KERN_WARNING "hfs: unable to locate alternate MDB\n");
+               printk(KERN_WARNING "hfs: continuing without an alternate MDB\n");
        }
 
        HFS_SB(sb)->bitmap = (__be32 *)__get_free_pages(GFP_KERNEL, PAGE_SIZE < 8192 ? 1 : 0);
@@ -177,7 +177,7 @@ int hfs_mdb_get(struct super_block *sb)
        while (size) {
                bh = sb_bread(sb, off >> sb->s_blocksize_bits);
                if (!bh) {
-                       hfs_warn("hfs_fs: unable to read volume bitmap\n");
+                       printk(KERN_ERR "hfs: unable to read volume bitmap\n");
                        goto out;
                }
                off2 = off & (sb->s_blocksize - 1);
@@ -191,23 +191,23 @@ int hfs_mdb_get(struct super_block *sb)
 
        HFS_SB(sb)->ext_tree = hfs_btree_open(sb, HFS_EXT_CNID, hfs_ext_keycmp);
        if (!HFS_SB(sb)->ext_tree) {
-               hfs_warn("hfs_fs: unable to open extent tree\n");
+               printk(KERN_ERR "hfs: unable to open extent tree\n");
                goto out;
        }
        HFS_SB(sb)->cat_tree = hfs_btree_open(sb, HFS_CAT_CNID, hfs_cat_keycmp);
        if (!HFS_SB(sb)->cat_tree) {
-               hfs_warn("hfs_fs: unable to open catalog tree\n");
+               printk(KERN_ERR "hfs: unable to open catalog tree\n");
                goto out;
        }
 
        attrib = mdb->drAtrb;
        if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
-               hfs_warn("HFS-fs warning: Filesystem was not cleanly unmounted, "
+               printk(KERN_WARNING "hfs: filesystem was not cleanly unmounted, "
                         "running fsck.hfs is recommended.  mounting read-only.\n");
                sb->s_flags |= MS_RDONLY;
        }
        if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) {
-               hfs_warn("HFS-fs: Filesystem is marked locked, mounting read-only.\n");
+               printk(KERN_WARNING "hfs: filesystem is marked locked, mounting read-only.\n");
                sb->s_flags |= MS_RDONLY;
        }
        if (!(sb->s_flags & MS_RDONLY)) {
@@ -303,7 +303,7 @@ void hfs_mdb_commit(struct super_block *sb)
                while (size) {
                        bh = sb_bread(sb, block);
                        if (!bh) {
-                               hfs_warn("hfs_fs: unable to read volume bitmap\n");
+                               printk(KERN_ERR "hfs: unable to read volume bitmap\n");
                                break;
                        }
                        len = min((int)sb->s_blocksize - off, size);
index c5074aeafcae7e8bebaf55780e1548692cdad78b..1181d116117dc52f40b503190a03e28053517eaa 100644 (file)
@@ -101,12 +101,12 @@ static int hfs_remount(struct super_block *sb, int *flags, char *data)
                return 0;
        if (!(*flags & MS_RDONLY)) {
                if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
-                       printk("HFS-fs warning: Filesystem was not cleanly unmounted, "
+                       printk(KERN_WARNING "hfs: filesystem was not cleanly unmounted, "
                               "running fsck.hfs is recommended.  leaving read-only.\n");
                        sb->s_flags |= MS_RDONLY;
                        *flags |= MS_RDONLY;
                } else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) {
-                       printk("HFS-fs: Filesystem is marked locked, leaving read-only.\n");
+                       printk(KERN_WARNING "hfs: filesystem is marked locked, leaving read-only.\n");
                        sb->s_flags |= MS_RDONLY;
                        *flags |= MS_RDONLY;
                }
@@ -229,21 +229,21 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
                switch (token) {
                case opt_uid:
                        if (match_int(&args[0], &tmp)) {
-                               printk("HFS: uid requires an argument\n");
+                               printk(KERN_ERR "hfs: uid requires an argument\n");
                                return 0;
                        }
                        hsb->s_uid = (uid_t)tmp;
                        break;
                case opt_gid:
                        if (match_int(&args[0], &tmp)) {
-                               printk("HFS: gid requires an argument\n");
+                               printk(KERN_ERR "hfs: gid requires an argument\n");
                                return 0;
                        }
                        hsb->s_gid = (gid_t)tmp;
                        break;
                case opt_umask:
                        if (match_octal(&args[0], &tmp)) {
-                               printk("HFS: umask requires a value\n");
+                               printk(KERN_ERR "hfs: umask requires a value\n");
                                return 0;
                        }
                        hsb->s_file_umask = (umode_t)tmp;
@@ -251,39 +251,39 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
                        break;
                case opt_file_umask:
                        if (match_octal(&args[0], &tmp)) {
-                               printk("HFS: file_umask requires a value\n");
+                               printk(KERN_ERR "hfs: file_umask requires a value\n");
                                return 0;
                        }
                        hsb->s_file_umask = (umode_t)tmp;
                        break;
                case opt_dir_umask:
                        if (match_octal(&args[0], &tmp)) {
-                               printk("HFS: dir_umask requires a value\n");
+                               printk(KERN_ERR "hfs: dir_umask requires a value\n");
                                return 0;
                        }
                        hsb->s_dir_umask = (umode_t)tmp;
                        break;
                case opt_part:
                        if (match_int(&args[0], &hsb->part)) {
-                               printk("HFS: part requires an argument\n");
+                               printk(KERN_ERR "hfs: part requires an argument\n");
                                return 0;
                        }
                        break;
                case opt_session:
                        if (match_int(&args[0], &hsb->session)) {
-                               printk("HFS: session requires an argument\n");
+                               printk(KERN_ERR "hfs: session requires an argument\n");
                                return 0;
                        }
                        break;
                case opt_type:
                        if (match_fourchar(&args[0], &hsb->s_type)) {
-                               printk("HFS+-fs: type requires a 4 character value\n");
+                               printk(KERN_ERR "hfs: type requires a 4 character value\n");
                                return 0;
                        }
                        break;
                case opt_creator:
                        if (match_fourchar(&args[0], &hsb->s_creator)) {
-                               printk("HFS+-fs: creator requires a 4 character value\n");
+                               printk(KERN_ERR "hfs: creator requires a 4 character value\n");
                                return 0;
                        }
                        break;
@@ -292,13 +292,13 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
                        break;
                case opt_codepage:
                        if (hsb->nls_disk) {
-                               printk("HFS+-fs: unable to change codepage\n");
+                               printk(KERN_ERR "hfs: unable to change codepage\n");
                                return 0;
                        }
                        p = match_strdup(&args[0]);
                        hsb->nls_disk = load_nls(p);
                        if (!hsb->nls_disk) {
-                               printk("HFS+-fs: unable to load codepage \"%s\"\n", p);
+                               printk(KERN_ERR "hfs: unable to load codepage \"%s\"\n", p);
                                kfree(p);
                                return 0;
                        }
@@ -306,13 +306,13 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
                        break;
                case opt_iocharset:
                        if (hsb->nls_io) {
-                               printk("HFS: unable to change iocharset\n");
+                               printk(KERN_ERR "hfs: unable to change iocharset\n");
                                return 0;
                        }
                        p = match_strdup(&args[0]);
                        hsb->nls_io = load_nls(p);
                        if (!hsb->nls_io) {
-                               printk("HFS: unable to load iocharset \"%s\"\n", p);
+                               printk(KERN_ERR "hfs: unable to load iocharset \"%s\"\n", p);
                                kfree(p);
                                return 0;
                        }
@@ -326,7 +326,7 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
        if (hsb->nls_disk && !hsb->nls_io) {
                hsb->nls_io = load_nls_default();
                if (!hsb->nls_io) {
-                       printk("HFS: unable to load default iocharset\n");
+                       printk(KERN_ERR "hfs: unable to load default iocharset\n");
                        return 0;
                }
        }
@@ -364,7 +364,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
 
        res = -EINVAL;
        if (!parse_options((char *)data, sbi)) {
-               hfs_warn("hfs_fs: unable to parse mount options.\n");
+               printk(KERN_ERR "hfs: unable to parse mount options.\n");
                goto bail;
        }
 
@@ -375,7 +375,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
        res = hfs_mdb_get(sb);
        if (res) {
                if (!silent)
-                       hfs_warn("VFS: Can't find a HFS filesystem on dev %s.\n",
+                       printk(KERN_WARNING "hfs: can't find a HFS filesystem on dev %s.\n",
                                hfs_mdb_name(sb));
                res = -EINVAL;
                goto bail;
@@ -407,7 +407,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
 bail_iput:
        iput(root_inode);
 bail_no_root:
-       hfs_warn("hfs_fs: get root inode failed.\n");
+       printk(KERN_ERR "hfs: get root inode failed.\n");
 bail:
        hfs_mdb_put(sb);
        return res;
@@ -454,7 +454,7 @@ static void __exit exit_hfs_fs(void)
 {
        unregister_filesystem(&hfs_fs_type);
        if (kmem_cache_destroy(hfs_inode_cachep))
-               printk(KERN_INFO "hfs_inode_cache: not all structures were freed\n");
+               printk(KERN_ERR "hfs_inode_cache: not all structures were freed\n");
 }
 
 module_init(init_hfs_fs)
index 257cdde0514b4ee830c2c51de025d2719a9a9777..5007a41f1be9d345ff11dd7420285ee6a79c08e2 100644 (file)
@@ -64,7 +64,6 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
                else
                        e = rec - 1;
        } while (b <= e);
-       //printk("%d: %d,%d,%d\n", bnode->this, b, e, rec);
        if (rec != e && e >= 0) {
                len = hfs_brec_lenoff(bnode, e, &off);
                keylen = hfs_brec_keylen(bnode, e);
@@ -127,7 +126,7 @@ int hfs_brec_find(struct hfs_find_data *fd)
        return res;
 
 invalid:
-       printk("HFS+-fs: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n",
+       printk(KERN_ERR "hfs: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n",
                height, bnode->height, bnode->type, nidx, parent);
        res = -EIO;
 release:
index 930cd9212de84ada01b426f0acaee32a564f6773..8f07e8fbd03d34cdf4b294d1395234f9ff328546 100644 (file)
@@ -358,7 +358,7 @@ void hfs_bnode_unlink(struct hfs_bnode *node)
 
        // move down?
        if (!node->prev && !node->next) {
-               printk("hfs_btree_del_level\n");
+               printk(KERN_DEBUG "hfs_btree_del_level\n");
        }
        if (!node->parent) {
                tree->root = 0;
@@ -379,7 +379,7 @@ struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
        struct hfs_bnode *node;
 
        if (cnid >= tree->node_count) {
-               printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid);
+               printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid);
                return NULL;
        }
 
@@ -402,7 +402,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
        loff_t off;
 
        if (cnid >= tree->node_count) {
-               printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid);
+               printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid);
                return NULL;
        }
 
@@ -576,8 +576,9 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
        node = hfs_bnode_findhash(tree, num);
        spin_unlock(&tree->hash_lock);
        if (node) {
-               printk("new node %u already hashed?\n", num);
-               BUG();
+               printk(KERN_CRIT "new node %u already hashed?\n", num);
+               WARN_ON(1);
+               return node;
        }
        node = __hfs_bnode_create(tree, num);
        if (!node)
index 0ccef2ab790c048b6b838ed3d4753d09e62fa164..c88e5d72a402ae2d29a8905cdccf7b59ccd4337d 100644 (file)
@@ -360,7 +360,7 @@ again:
                end_off = hfs_bnode_read_u16(parent, end_rec_off);
                if (end_rec_off - end_off < diff) {
 
-                       printk("splitting index node...\n");
+                       printk(KERN_DEBUG "hfs: splitting index node...\n");
                        fd->bnode = parent;
                        new_node = hfs_bnode_split(fd);
                        if (IS_ERR(new_node))
index 44326aa2bd34b927099b2397ea62c070b28849cf..a67edfa34e9ec6ac847d9e2fe2f6bb08725e038d 100644 (file)
@@ -31,17 +31,8 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
 
        init_MUTEX(&tree->tree_lock);
        spin_lock_init(&tree->hash_lock);
-       /* Set the correct compare function */
        tree->sb = sb;
        tree->cnid = id;
-       if (id == HFSPLUS_EXT_CNID) {
-               tree->keycmp = hfsplus_ext_cmp_key;
-       } else if (id == HFSPLUS_CAT_CNID) {
-               tree->keycmp = hfsplus_cat_cmp_key;
-       } else {
-               printk("HFS+-fs: unknown B*Tree requested\n");
-               goto free_tree;
-       }
        tree->inode = iget(sb, id);
        if (!tree->inode)
                goto free_tree;
@@ -64,6 +55,20 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
        tree->max_key_len = be16_to_cpu(head->max_key_len);
        tree->depth = be16_to_cpu(head->depth);
 
+       /* Set the correct compare function */
+       if (id == HFSPLUS_EXT_CNID) {
+               tree->keycmp = hfsplus_ext_cmp_key;
+       } else if (id == HFSPLUS_CAT_CNID) {
+               if ((HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX) &&
+                   (head->key_type == HFSPLUS_KEY_BINARY))
+                       tree->keycmp = hfsplus_cat_bin_cmp_key;
+               else
+                       tree->keycmp = hfsplus_cat_case_cmp_key;
+       } else {
+               printk(KERN_ERR "hfs: unknown B*Tree requested\n");
+               goto fail_page;
+       }
+
        size = tree->node_size;
        if (!size || size & (size - 1))
                goto fail_page;
@@ -99,7 +104,7 @@ void hfs_btree_close(struct hfs_btree *tree)
                while ((node = tree->node_hash[i])) {
                        tree->node_hash[i] = node->next_hash;
                        if (atomic_read(&node->refcnt))
-                               printk("HFS+: node %d:%d still has %d user(s)!\n",
+                               printk(KERN_CRIT "hfs: node %d:%d still has %d user(s)!\n",
                                        node->tree->cnid, node->this, atomic_read(&node->refcnt));
                        hfs_bnode_free(node);
                        tree->node_hash_cnt--;
@@ -223,10 +228,6 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
                                                tree->free_nodes--;
                                                mark_inode_dirty(tree->inode);
                                                hfs_bnode_put(node);
-                                               if (!idx) {
-                                                       printk("unexpected idx %u (%u)\n", idx, node->this);
-                                                       BUG();
-                                               }
                                                return hfs_bnode_create(tree, idx);
                                        }
                                }
@@ -242,7 +243,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
                kunmap(*pagep);
                nidx = node->next;
                if (!nidx) {
-                       printk("create new bmap node...\n");
+                       printk(KERN_DEBUG "hfs: create new bmap node...\n");
                        next_node = hfs_bmap_new_bmap(node, idx);
                } else
                        next_node = hfs_bnode_find(tree, nidx);
@@ -284,7 +285,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
                hfs_bnode_put(node);
                if (!i) {
                        /* panic */;
-                       printk("HFS: unable to free bnode %u. bmap not found!\n", node->this);
+                       printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
                        return;
                }
                node = hfs_bnode_find(tree, i);
@@ -292,7 +293,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
                        return;
                if (node->type != HFS_NODE_MAP) {
                        /* panic */;
-                       printk("HFS: invalid bmap found! (%u,%d)\n", node->this, node->type);
+                       printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
                        hfs_bnode_put(node);
                        return;
                }
@@ -305,7 +306,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
        m = 1 << (~nidx & 7);
        byte = data[off];
        if (!(byte & m)) {
-               printk("HFS: trying to free free bnode %u(%d)\n", node->this, node->type);
+               printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
                kunmap(page);
                hfs_bnode_put(node);
                return;
index 94712790c8b3f2414e4516fa4f291c19f55a2ff9..f2d7c49ce7595d16ed12e3ae834dbe6e93b54c6d 100644 (file)
@@ -13,7 +13,8 @@
 #include "hfsplus_fs.h"
 #include "hfsplus_raw.h"
 
-int hfsplus_cat_cmp_key(hfsplus_btree_key *k1, hfsplus_btree_key *k2)
+int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *k1,
+                            const hfsplus_btree_key *k2)
 {
        __be32 k1p, k2p;
 
@@ -22,7 +23,20 @@ int hfsplus_cat_cmp_key(hfsplus_btree_key *k1, hfsplus_btree_key *k2)
        if (k1p != k2p)
                return be32_to_cpu(k1p) < be32_to_cpu(k2p) ? -1 : 1;
 
-       return hfsplus_unistrcmp(&k1->cat.name, &k2->cat.name);
+       return hfsplus_strcasecmp(&k1->cat.name, &k2->cat.name);
+}
+
+int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
+                           const hfsplus_btree_key *k2)
+{
+       __be32 k1p, k2p;
+
+       k1p = k1->cat.parent;
+       k2p = k2->cat.parent;
+       if (k1p != k2p)
+               return be32_to_cpu(k1p) < be32_to_cpu(k2p) ? -1 : 1;
+
+       return hfsplus_strcmp(&k1->cat.name, &k2->cat.name);
 }
 
 void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
@@ -80,8 +94,11 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct i
                memset(folder, 0, sizeof(*folder));
                folder->type = cpu_to_be16(HFSPLUS_FOLDER);
                folder->id = cpu_to_be32(inode->i_ino);
-               folder->create_date = folder->content_mod_date =
-                       folder->attribute_mod_date = folder->access_date = hfsp_now2mt();
+               HFSPLUS_I(inode).create_date =
+                       folder->create_date =
+                       folder->content_mod_date =
+                       folder->attribute_mod_date =
+                       folder->access_date = hfsp_now2mt();
                hfsplus_set_perms(inode, &folder->permissions);
                if (inode == HFSPLUS_SB(inode->i_sb).hidden_dir)
                        /* invisible and namelocked */
@@ -95,18 +112,27 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct i
                file->type = cpu_to_be16(HFSPLUS_FILE);
                file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS);
                file->id = cpu_to_be32(cnid);
-               file->create_date = file->content_mod_date =
-                       file->attribute_mod_date = file->access_date = hfsp_now2mt();
+               HFSPLUS_I(inode).create_date =
+                       file->create_date =
+                       file->content_mod_date =
+                       file->attribute_mod_date =
+                       file->access_date = hfsp_now2mt();
                if (cnid == inode->i_ino) {
                        hfsplus_set_perms(inode, &file->permissions);
-                       file->user_info.fdType = cpu_to_be32(HFSPLUS_SB(inode->i_sb).type);
-                       file->user_info.fdCreator = cpu_to_be32(HFSPLUS_SB(inode->i_sb).creator);
+                       if (S_ISLNK(inode->i_mode)) {
+                               file->user_info.fdType = cpu_to_be32(HFSP_SYMLINK_TYPE);
+                               file->user_info.fdCreator = cpu_to_be32(HFSP_SYMLINK_CREATOR);
+                       } else {
+                               file->user_info.fdType = cpu_to_be32(HFSPLUS_SB(inode->i_sb).type);
+                               file->user_info.fdCreator = cpu_to_be32(HFSPLUS_SB(inode->i_sb).creator);
+                       }
                        if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
                                file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
                } else {
                        file->user_info.fdType = cpu_to_be32(HFSP_HARDLINK_TYPE);
                        file->user_info.fdCreator = cpu_to_be32(HFSP_HFSPLUS_CREATOR);
                        file->user_info.fdFlags = cpu_to_be16(0x100);
+                       file->create_date = HFSPLUS_I(HFSPLUS_SB(inode->i_sb).hidden_dir).create_date;
                        file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode).dev);
                }
                return sizeof(*file);
@@ -139,7 +165,7 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
 
        type = be16_to_cpu(tmp.type);
        if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) {
-               printk("HFS+-fs: Found bad thread record in catalog\n");
+               printk(KERN_ERR "hfs: found bad thread record in catalog\n");
                return -EIO;
        }
 
index 50c8f44b6c665f6e605957346515b9ea6e524af5..01a6fe3a395c765eeb79f5a22b1f31a91931f99e 100644 (file)
@@ -66,25 +66,32 @@ again:
                }
                cnid = be32_to_cpu(entry.file.id);
                if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) &&
-                   entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR)) {
+                   entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
+                   (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb).hidden_dir).create_date ||
+                    entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode).create_date) &&
+                   HFSPLUS_SB(sb).hidden_dir) {
                        struct qstr str;
                        char name[32];
 
                        if (dentry->d_fsdata) {
-                               err = -ENOENT;
-                               inode = NULL;
-                               goto out;
+                               /*
+                                * We found a link pointing to another link,
+                                * so ignore it and treat it as regular file.
+                                */
+                               cnid = (unsigned long)dentry->d_fsdata;
+                               linkid = 0;
+                       } else {
+                               dentry->d_fsdata = (void *)(unsigned long)cnid;
+                               linkid = be32_to_cpu(entry.file.permissions.dev);
+                               str.len = sprintf(name, "iNode%d", linkid);
+                               str.name = name;
+                               hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_SB(sb).hidden_dir->i_ino, &str);
+                               goto again;
                        }
-                       dentry->d_fsdata = (void *)(unsigned long)cnid;
-                       linkid = be32_to_cpu(entry.file.permissions.dev);
-                       str.len = sprintf(name, "iNode%d", linkid);
-                       str.name = name;
-                       hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_SB(sb).hidden_dir->i_ino, &str);
-                       goto again;
                } else if (!dentry->d_fsdata)
                        dentry->d_fsdata = (void *)(unsigned long)cnid;
        } else {
-               printk("HFS+-fs: Illegal catalog entry type in lookup\n");
+               printk(KERN_ERR "hfs: invalid catalog entry type in lookup\n");
                err = -EIO;
                goto fail;
        }
@@ -132,12 +139,12 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
        case 1:
                hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
                if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
-                       printk("HFS+-fs: bad catalog folder thread\n");
+                       printk(KERN_ERR "hfs: bad catalog folder thread\n");
                        err = -EIO;
                        goto out;
                }
                if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) {
-                       printk("HFS+-fs: truncated catalog thread\n");
+                       printk(KERN_ERR "hfs: truncated catalog thread\n");
                        err = -EIO;
                        goto out;
                }
@@ -156,7 +163,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
 
        for (;;) {
                if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) {
-                       printk("HFS+-fs: walked past end of dir\n");
+                       printk(KERN_ERR "hfs: walked past end of dir\n");
                        err = -EIO;
                        goto out;
                }
@@ -168,7 +175,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        goto out;
                if (type == HFSPLUS_FOLDER) {
                        if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
-                               printk("HFS+-fs: small dir entry\n");
+                               printk(KERN_ERR "hfs: small dir entry\n");
                                err = -EIO;
                                goto out;
                        }
@@ -180,7 +187,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                break;
                } else if (type == HFSPLUS_FILE) {
                        if (fd.entrylength < sizeof(struct hfsplus_cat_file)) {
-                               printk("HFS+-fs: small file entry\n");
+                               printk(KERN_ERR "hfs: small file entry\n");
                                err = -EIO;
                                goto out;
                        }
@@ -188,7 +195,7 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
                                    be32_to_cpu(entry.file.id), DT_REG))
                                break;
                } else {
-                       printk("HFS+-fs: bad catalog entry type\n");
+                       printk(KERN_ERR "hfs: bad catalog entry type\n");
                        err = -EIO;
                        goto out;
                }
@@ -330,7 +337,8 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
        if (res)
                return res;
 
-       inode->i_nlink--;
+       if (inode->i_nlink > 0)
+               inode->i_nlink--;
        hfsplus_delete_inode(inode);
        if (inode->i_ino != cnid && !inode->i_nlink) {
                if (!atomic_read(&HFSPLUS_I(inode).opencnt)) {
@@ -339,7 +347,8 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
                                hfsplus_delete_inode(inode);
                } else
                        inode->i_flags |= S_DEAD;
-       }
+       } else
+               inode->i_nlink = 0;
        inode->i_ctime = CURRENT_TIME_SEC;
        mark_inode_dirty(inode);
 
index e3ff56a030117325cecc46982258829390d64671..1a7480089e82ef918cb71c91a52c4e86b214c2df 100644 (file)
@@ -16,7 +16,8 @@
 #include "hfsplus_raw.h"
 
 /* Compare two extents keys, returns 0 on same, pos/neg for difference */
-int hfsplus_ext_cmp_key(hfsplus_btree_key *k1, hfsplus_btree_key *k2)
+int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1,
+                       const hfsplus_btree_key *k2)
 {
        __be32 k1id, k2id;
        __be32 k1s, k2s;
@@ -349,10 +350,9 @@ int hfsplus_file_extend(struct inode *inode)
 
        if (HFSPLUS_SB(sb).alloc_file->i_size * 8 < HFSPLUS_SB(sb).total_blocks - HFSPLUS_SB(sb).free_blocks + 8) {
                // extend alloc file
-               printk("extend alloc file! (%Lu,%u,%u)\n", HFSPLUS_SB(sb).alloc_file->i_size * 8,
+               printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n", HFSPLUS_SB(sb).alloc_file->i_size * 8,
                        HFSPLUS_SB(sb).total_blocks, HFSPLUS_SB(sb).free_blocks);
                return -ENOSPC;
-               //BUG();
        }
 
        down(&HFSPLUS_I(inode).extents_lock);
index 0fa1ab6250bffb55d93af2676e874a483d86ba2b..7ae393637a0ccc688dbfd38f674123d673081471 100644 (file)
@@ -36,7 +36,7 @@
 #define HFSPLUS_TYPE_DATA 0x00
 #define HFSPLUS_TYPE_RSRC 0xFF
 
-typedef int (*btree_keycmp)(hfsplus_btree_key *, hfsplus_btree_key *);
+typedef int (*btree_keycmp)(const hfsplus_btree_key *, const hfsplus_btree_key *);
 
 #define NODE_HASH_SIZE 256
 
@@ -149,6 +149,7 @@ struct hfsplus_sb_info {
 #define HFSPLUS_SB_WRITEBACKUP 0x0001
 #define HFSPLUS_SB_NODECOMPOSE 0x0002
 #define HFSPLUS_SB_FORCE       0x0004
+#define HFSPLUS_SB_HFSX                0x0008
 
 
 struct hfsplus_inode_info {
@@ -165,6 +166,7 @@ struct hfsplus_inode_info {
        struct inode *rsrc_inode;
        unsigned long flags;
 
+       __be32 create_date;
        /* Device number in hfsplus_permissions in catalog */
        u32 dev;
        /* BSD system and user file flags */
@@ -303,7 +305,8 @@ int hfs_brec_read(struct hfs_find_data *, void *, int);
 int hfs_brec_goto(struct hfs_find_data *, int);
 
 /* catalog.c */
-int hfsplus_cat_cmp_key(hfsplus_btree_key *, hfsplus_btree_key *);
+int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *);
+int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *);
 void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *, u32, struct qstr *);
 int hfsplus_find_cat(struct super_block *, u32, struct hfs_find_data *);
 int hfsplus_create_cat(u32, struct inode *, struct qstr *, struct inode *);
@@ -312,7 +315,7 @@ int hfsplus_rename_cat(u32, struct inode *, struct qstr *,
                       struct inode *, struct qstr *);
 
 /* extents.c */
-int hfsplus_ext_cmp_key(hfsplus_btree_key *, hfsplus_btree_key *);
+int hfsplus_ext_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *);
 void hfsplus_ext_write_extent(struct inode *);
 int hfsplus_get_block(struct inode *, sector_t, struct buffer_head *, int);
 int hfsplus_free_fork(struct super_block *, u32, struct hfsplus_fork_raw *, int);
@@ -350,7 +353,8 @@ extern u16 hfsplus_decompose_table[];
 extern u16 hfsplus_compose_table[];
 
 /* unicode.c */
-int hfsplus_unistrcmp(const struct hfsplus_unistr *, const struct hfsplus_unistr *);
+int hfsplus_strcasecmp(const struct hfsplus_unistr *, const struct hfsplus_unistr *);
+int hfsplus_strcmp(const struct hfsplus_unistr *, const struct hfsplus_unistr *);
 int hfsplus_uni2asc(struct super_block *, const struct hfsplus_unistr *, char *, int *);
 int hfsplus_asc2uni(struct super_block *, struct hfsplus_unistr *, const char *, int);
 
index b4fbed63321944dac1ff47676845e808512d0efb..49205531a5006b0ce731908628c5d485afeed77c 100644 (file)
 #define HFSPLUS_SECTOR_SHIFT         9
 #define HFSPLUS_VOLHEAD_SECTOR       2
 #define HFSPLUS_VOLHEAD_SIG     0x482b
+#define HFSPLUS_VOLHEAD_SIGX    0x4858
 #define HFSPLUS_SUPER_MAGIC     0x482b
-#define HFSPLUS_CURRENT_VERSION      4
+#define HFSPLUS_MIN_VERSION          4
+#define HFSPLUS_CURRENT_VERSION      5
 
 #define HFSP_WRAP_MAGIC         0x4244
 #define HFSP_WRAP_ATTRIB_SLOCK  0x8000
@@ -41,6 +43,9 @@
 #define HFSP_HARDLINK_TYPE     0x686c6e6b      /* 'hlnk' */
 #define HFSP_HFSPLUS_CREATOR   0x6866732b      /* 'hfs+' */
 
+#define HFSP_SYMLINK_TYPE      0x736c6e6b      /* 'slnk' */
+#define HFSP_SYMLINK_CREATOR   0x72686170      /* 'rhap' */
+
 #define HFSP_MOUNT_VERSION     0x482b4c78      /* 'H+Lx' */
 
 /* Structures used on disk */
@@ -161,7 +166,7 @@ struct hfs_btree_header_rec {
        u16 reserved1;
        __be32 clump_size;
        u8 btree_type;
-       u8 reserved2;
+       u8 key_type;
        __be32 attributes;
        u32 reserved3[16];
 } __packed;
@@ -186,6 +191,10 @@ struct hfs_btree_header_rec {
 #define HFSPLUS_EXCH_CNID              15      /* ExchangeFiles temp id */
 #define HFSPLUS_FIRSTUSER_CNID         16      /* first available user id */
 
+/* btree key type */
+#define HFSPLUS_KEY_CASEFOLDING                0xCF    /* case-insensitive */
+#define HFSPLUS_KEY_BINARY             0xBC    /* case-sensitive */
+
 /* HFS+ catalog entry key */
 struct hfsplus_cat_key {
        __be16 key_len;
index 7acff6c5464ffdca66599d0dbebe524571139c40..12ed2b7d046bf11ae2023523c52b33ac55975651 100644 (file)
 
 static int hfsplus_readpage(struct file *file, struct page *page)
 {
-       //printk("readpage: %lu\n", page->index);
        return block_read_full_page(page, hfsplus_get_block);
 }
 
 static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
 {
-       //printk("writepage: %lu\n", page->index);
        return block_write_full_page(page, hfsplus_get_block, wbc);
 }
 
@@ -92,7 +90,6 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
                } while (--i && nidx < tree->node_count);
                spin_unlock(&tree->hash_lock);
        }
-       //printk("releasepage: %lu,%x = %d\n", page->index, mask, res);
        return res ? try_to_free_buffers(page) : 0;
 }
 
@@ -434,7 +431,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
                inode->i_size = 2 + be32_to_cpu(folder->valence);
                inode->i_atime = hfsp_mt2ut(folder->access_date);
                inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
-               inode->i_ctime = inode->i_mtime;
+               inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
+               HFSPLUS_I(inode).create_date = folder->create_date;
                HFSPLUS_I(inode).fs_blocks = 0;
                inode->i_op = &hfsplus_dir_inode_operations;
                inode->i_fop = &hfsplus_dir_operations;
@@ -465,9 +463,10 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
                }
                inode->i_atime = hfsp_mt2ut(file->access_date);
                inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
-               inode->i_ctime = inode->i_mtime;
+               inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
+               HFSPLUS_I(inode).create_date = file->create_date;
        } else {
-               printk("HFS+-fs: bad catalog entry used to create inode\n");
+               printk(KERN_ERR "hfs: bad catalog entry used to create inode\n");
                res = -EIO;
        }
        return res;
index 935dafba007855aadc16b42b0a5e0831a987afb8..dc64fac008315092a6d878e5df8634735ea3bbb8 100644 (file)
@@ -83,58 +83,58 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
                switch (token) {
                case opt_creator:
                        if (match_fourchar(&args[0], &sbi->creator)) {
-                               printk("HFS+-fs: creator requires a 4 character value\n");
+                               printk(KERN_ERR "hfs: creator requires a 4 character value\n");
                                return 0;
                        }
                        break;
                case opt_type:
                        if (match_fourchar(&args[0], &sbi->type)) {
-                               printk("HFS+-fs: type requires a 4 character value\n");
+                               printk(KERN_ERR "hfs: type requires a 4 character value\n");
                                return 0;
                        }
                        break;
                case opt_umask:
                        if (match_octal(&args[0], &tmp)) {
-                               printk("HFS+-fs: umask requires a value\n");
+                               printk(KERN_ERR "hfs: umask requires a value\n");
                                return 0;
                        }
                        sbi->umask = (umode_t)tmp;
                        break;
                case opt_uid:
                        if (match_int(&args[0], &tmp)) {
-                               printk("HFS+-fs: uid requires an argument\n");
+                               printk(KERN_ERR "hfs: uid requires an argument\n");
                                return 0;
                        }
                        sbi->uid = (uid_t)tmp;
                        break;
                case opt_gid:
                        if (match_int(&args[0], &tmp)) {
-                               printk("HFS+-fs: gid requires an argument\n");
+                               printk(KERN_ERR "hfs: gid requires an argument\n");
                                return 0;
                        }
                        sbi->gid = (gid_t)tmp;
                        break;
                case opt_part:
                        if (match_int(&args[0], &sbi->part)) {
-                               printk("HFS+-fs: part requires an argument\n");
+                               printk(KERN_ERR "hfs: part requires an argument\n");
                                return 0;
                        }
                        break;
                case opt_session:
                        if (match_int(&args[0], &sbi->session)) {
-                               printk("HFS+-fs: session requires an argument\n");
+                               printk(KERN_ERR "hfs: session requires an argument\n");
                                return 0;
                        }
                        break;
                case opt_nls:
                        if (sbi->nls) {
-                               printk("HFS+-fs: unable to change nls mapping\n");
+                               printk(KERN_ERR "hfs: unable to change nls mapping\n");
                                return 0;
                        }
                        p = match_strdup(&args[0]);
                        sbi->nls = load_nls(p);
                        if (!sbi->nls) {
-                               printk("HFS+-fs: unable to load nls mapping \"%s\"\n", p);
+                               printk(KERN_ERR "hfs: unable to load nls mapping \"%s\"\n", p);
                                kfree(p);
                                return 0;
                        }
index d791780def50fb98bff0343eee81fa602ad6fb31..7843f792a4b79494919338965bba8aa9dfdb27f4 100644 (file)
@@ -169,7 +169,7 @@ static void hfsplus_write_super(struct super_block *sb)
                        block = HFSPLUS_SB(sb).blockoffset;
                        block += (HFSPLUS_SB(sb).sect_count - 2) >> (sb->s_blocksize_bits - 9);
                        offset = ((HFSPLUS_SB(sb).sect_count - 2) << 9) & (sb->s_blocksize - 1);
-                       printk("backup: %u,%u,%u,%u\n", HFSPLUS_SB(sb).blockoffset,
+                       printk(KERN_DEBUG "hfs: backup: %u,%u,%u,%u\n", HFSPLUS_SB(sb).blockoffset,
                                HFSPLUS_SB(sb).sect_count, block, offset);
                        bh = sb_bread(sb, block);
                        if (bh) {
@@ -179,7 +179,7 @@ static void hfsplus_write_super(struct super_block *sb)
                                        mark_buffer_dirty(bh);
                                        brelse(bh);
                                } else
-                                       printk("backup not found!\n");
+                                       printk(KERN_WARNING "hfs: backup not found!\n");
                        }
                }
                HFSPLUS_SB(sb).flags &= ~HFSPLUS_SB_WRITEBACKUP;
@@ -240,18 +240,18 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
                        return -EINVAL;
 
                if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
-                       printk("HFS+-fs warning: Filesystem was not cleanly unmounted, "
+                       printk(KERN_WARNING "hfs: filesystem was not cleanly unmounted, "
                               "running fsck.hfsplus is recommended.  leaving read-only.\n");
                        sb->s_flags |= MS_RDONLY;
                        *flags |= MS_RDONLY;
                } else if (sbi.flags & HFSPLUS_SB_FORCE) {
                        /* nothing */
                } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
-                       printk("HFS+-fs: Filesystem is marked locked, leaving read-only.\n");
+                       printk(KERN_WARNING "hfs: filesystem is marked locked, leaving read-only.\n");
                        sb->s_flags |= MS_RDONLY;
                        *flags |= MS_RDONLY;
                } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
-                       printk("HFS+-fs: Filesystem is marked journaled, leaving read-only.\n");
+                       printk(KERN_WARNING "hfs: filesystem is marked journaled, leaving read-only.\n");
                        sb->s_flags |= MS_RDONLY;
                        *flags |= MS_RDONLY;
                }
@@ -292,8 +292,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        INIT_HLIST_HEAD(&sbi->rsrc_inodes);
        hfsplus_fill_defaults(sbi);
        if (!hfsplus_parse_options(data, sbi)) {
-               if (!silent)
-                       printk("HFS+-fs: unable to parse mount options\n");
+               printk(KERN_ERR "hfs: unable to parse mount options\n");
                err = -EINVAL;
                goto cleanup;
        }
@@ -302,7 +301,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        nls = sbi->nls;
        sbi->nls = load_nls("utf8");
        if (!sbi->nls) {
-               printk("HFS+: unable to load nls for utf8\n");
+               printk(KERN_ERR "hfs: unable to load nls for utf8\n");
                err = -EINVAL;
                goto cleanup;
        }
@@ -310,17 +309,17 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        /* Grab the volume header */
        if (hfsplus_read_wrapper(sb)) {
                if (!silent)
-                       printk("HFS+-fs: unable to find HFS+ superblock\n");
+                       printk(KERN_WARNING "hfs: unable to find HFS+ superblock\n");
                err = -EINVAL;
                goto cleanup;
        }
        vhdr = HFSPLUS_SB(sb).s_vhdr;
 
        /* Copy parts of the volume header into the superblock */
-       sb->s_magic = be16_to_cpu(vhdr->signature);
-       if (be16_to_cpu(vhdr->version) != HFSPLUS_CURRENT_VERSION) {
-               if (!silent)
-                       printk("HFS+-fs: wrong filesystem version\n");
+       sb->s_magic = HFSPLUS_VOLHEAD_SIG;
+       if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION ||
+           be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) {
+               printk(KERN_ERR "hfs: wrong filesystem version\n");
                goto cleanup;
        }
        HFSPLUS_SB(sb).total_blocks = be32_to_cpu(vhdr->total_blocks);
@@ -341,20 +340,17 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_maxbytes = MAX_LFS_FILESIZE;
 
        if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
-               if (!silent)
-                       printk("HFS+-fs warning: Filesystem was not cleanly unmounted, "
-                              "running fsck.hfsplus is recommended.  mounting read-only.\n");
+               printk(KERN_WARNING "hfs: Filesystem was not cleanly unmounted, "
+                      "running fsck.hfsplus is recommended.  mounting read-only.\n");
                sb->s_flags |= MS_RDONLY;
        } else if (sbi->flags & HFSPLUS_SB_FORCE) {
                /* nothing */
        } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
-               if (!silent)
-                       printk("HFS+-fs: Filesystem is marked locked, mounting read-only.\n");
+               printk(KERN_WARNING "hfs: Filesystem is marked locked, mounting read-only.\n");
                sb->s_flags |= MS_RDONLY;
        } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
-               if (!silent)
-                       printk("HFS+-fs: write access to a jounaled filesystem is not supported, "
-                              "use the force option at your own risk, mounting read-only.\n");
+               printk(KERN_WARNING "hfs: write access to a jounaled filesystem is not supported, "
+                      "use the force option at your own risk, mounting read-only.\n");
                sb->s_flags |= MS_RDONLY;
        }
        sbi->flags &= ~HFSPLUS_SB_FORCE;
@@ -362,21 +358,18 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        /* Load metadata objects (B*Trees) */
        HFSPLUS_SB(sb).ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
        if (!HFSPLUS_SB(sb).ext_tree) {
-               if (!silent)
-                       printk("HFS+-fs: failed to load extents file\n");
+               printk(KERN_ERR "hfs: failed to load extents file\n");
                goto cleanup;
        }
        HFSPLUS_SB(sb).cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
        if (!HFSPLUS_SB(sb).cat_tree) {
-               if (!silent)
-                       printk("HFS+-fs: failed to load catalog file\n");
+               printk(KERN_ERR "hfs: failed to load catalog file\n");
                goto cleanup;
        }
 
        HFSPLUS_SB(sb).alloc_file = iget(sb, HFSPLUS_ALLOC_CNID);
        if (!HFSPLUS_SB(sb).alloc_file) {
-               if (!silent)
-                       printk("HFS+-fs: failed to load allocation file\n");
+               printk(KERN_ERR "hfs: failed to load allocation file\n");
                goto cleanup;
        }
 
@@ -384,8 +377,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        root = iget(sb, HFSPLUS_ROOT_CNID);
        sb->s_root = d_alloc_root(root);
        if (!sb->s_root) {
-               if (!silent)
-                       printk("HFS+-fs: failed to load root directory\n");
+               printk(KERN_ERR "hfs: failed to load root directory\n");
                iput(root);
                goto cleanup;
        }
@@ -419,7 +411,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
        sync_dirty_buffer(HFSPLUS_SB(sb).s_vhbh);
 
        if (!HFSPLUS_SB(sb).hidden_dir) {
-               printk("HFS+: create hidden dir...\n");
+               printk(KERN_DEBUG "hfs: create hidden dir...\n");
                HFSPLUS_SB(sb).hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
                hfsplus_create_cat(HFSPLUS_SB(sb).hidden_dir->i_ino, sb->s_root->d_inode,
                                   &str, HFSPLUS_SB(sb).hidden_dir);
@@ -499,7 +491,7 @@ static void __exit exit_hfsplus_fs(void)
 {
        unregister_filesystem(&hfsplus_fs_type);
        if (kmem_cache_destroy(hfsplus_inode_cachep))
-               printk(KERN_INFO "hfsplus_inode_cache: not all structures were freed\n");
+               printk(KERN_ERR "hfsplus_inode_cache: not all structures were freed\n");
 }
 
 module_init(init_hfsplus_fs)
index 060c69048c3ddc38fc867250f40c3dda66c7d1be..689c8bd721fb9231fa9d71853c033edaecbb365e 100644 (file)
@@ -28,7 +28,8 @@ static inline u16 case_fold(u16 c)
 }
 
 /* Compare unicode strings, return values like normal strcmp */
-int hfsplus_unistrcmp(const struct hfsplus_unistr *s1, const struct hfsplus_unistr *s2)
+int hfsplus_strcasecmp(const struct hfsplus_unistr *s1,
+                      const struct hfsplus_unistr *s2)
 {
        u16 len1, len2, c1, c2;
        const hfsplus_unichr *p1, *p2;
@@ -59,6 +60,33 @@ int hfsplus_unistrcmp(const struct hfsplus_unistr *s1, const struct hfsplus_unis
        }
 }
 
+/* Compare names as a sequence of 16-bit unsigned integers */
+int hfsplus_strcmp(const struct hfsplus_unistr *s1,
+                  const struct hfsplus_unistr *s2)
+{
+       u16 len1, len2, c1, c2;
+       const hfsplus_unichr *p1, *p2;
+       int len;
+
+       len1 = be16_to_cpu(s1->length);
+       len2 = be16_to_cpu(s2->length);
+       p1 = s1->unicode;
+       p2 = s2->unicode;
+
+       for (len = min(len1, len2); len > 0; len--) {
+               c1 = be16_to_cpu(*p1);
+               c2 = be16_to_cpu(*p2);
+               if (c1 != c2)
+                       return c1 < c2 ? -1 : 1;
+               p1++;
+               p2++;
+       }
+
+       return len1 < len2 ? -1 :
+              len1 > len2 ? 1 : 0;
+}
+
+
 #define Hangul_SBase   0xac00
 #define Hangul_LBase   0x1100
 #define Hangul_VBase   0x1161
index 95455e839231ee33a186485204ffc91fa193efeb..72cab78f05091282843baef84d6e03abdc8263aa 100644 (file)
@@ -28,8 +28,11 @@ static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
 {
        u32 extent;
        u16 attrib;
+       __be16 sig;
 
-       if (be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_EMBEDSIG)) != HFSPLUS_VOLHEAD_SIG)
+       sig = *(__be16 *)(bufptr + HFSP_WRAPOFF_EMBEDSIG);
+       if (sig != cpu_to_be16(HFSPLUS_VOLHEAD_SIG) &&
+           sig != cpu_to_be16(HFSPLUS_VOLHEAD_SIGX))
                return 0;
 
        attrib = be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ATTRIB));
@@ -70,7 +73,7 @@ static int hfsplus_get_last_session(struct super_block *sb,
                        *start = (sector_t)te.cdte_addr.lba << 2;
                        return 0;
                }
-               printk(KERN_ERR "HFS: Invalid session number or type of track\n");
+               printk(KERN_ERR "hfs: invalid session number or type of track\n");
                return -EINVAL;
        }
        ms_info.addr_format = CDROM_LBA;
@@ -114,6 +117,10 @@ int hfsplus_read_wrapper(struct super_block *sb)
                }
                if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIG))
                        break;
+               if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIGX)) {
+                       HFSPLUS_SB(sb).flags |= HFSPLUS_SB_HFSX;
+                       break;
+               }
                brelse(bh);
 
                /* check for a partition block
@@ -143,7 +150,7 @@ int hfsplus_read_wrapper(struct super_block *sb)
                blocksize >>= 1;
 
        if (sb_set_blocksize(sb, blocksize) != blocksize) {
-               printk("HFS+: unable to blocksize to %u!\n", blocksize);
+               printk(KERN_ERR "hfs: unable to set blocksize to %u!\n", blocksize);
                return -EINVAL;
        }
 
@@ -158,7 +165,9 @@ int hfsplus_read_wrapper(struct super_block *sb)
                return -EIO;
 
        /* should still be the same... */
-       if (be16_to_cpu(vhdr->signature) != HFSPLUS_VOLHEAD_SIG)
+       if (vhdr->signature != (HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX ?
+                               cpu_to_be16(HFSPLUS_VOLHEAD_SIGX) :
+                               cpu_to_be16(HFSPLUS_VOLHEAD_SIG)))
                goto error;
        HFSPLUS_SB(sb).s_vhbh = bh;
        HFSPLUS_SB(sb).s_vhdr = vhdr;
index 2fecb7af4a77d6316171f0e25a40fd9027a71647..878ccca61213c7cefcff20ce7e1e5b8e10039aab 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/list.h>
 #include <linux/writeback.h>
 #include <linux/inotify.h>
+#include <linux/syscalls.h>
 
 #include <asm/ioctls.h>
 
index cb3cef525c3bb049d89b92fc1575c93fb1d79799..e6265a0b56b8de25e8bceccd9e6be1eff10159f7 100644 (file)
@@ -338,7 +338,7 @@ restart:
         * done (maybe it's a new transaction, but it fell at the same
         * address).
         */
-       if (journal->j_checkpoint_transactions == transaction ||
+       if (journal->j_checkpoint_transactions == transaction &&
                        transaction->t_tid == this_tid) {
                int batch_count = 0;
                struct buffer_head *bhs[NR_BATCH];
index 002ad2bbc76992b6acda52def147799886147693..29e62d98bae64b5f326fb94beb88e87a8c8f183a 100644 (file)
@@ -829,7 +829,8 @@ restart_loop:
        journal->j_committing_transaction = NULL;
        spin_unlock(&journal->j_state_lock);
 
-       if (commit_transaction->t_checkpoint_list == NULL) {
+       if (commit_transaction->t_checkpoint_list == NULL &&
+           commit_transaction->t_checkpoint_io_list == NULL) {
                __journal_drop_transaction(journal, commit_transaction);
        } else {
                if (journal->j_checkpoint_transactions == NULL) {
index 33fb5bd34a8111c73768cfac883fd4ea49ea07b8..4acdac043b6bb0a1d42b66b7a601513b2049f06b 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/audit.h>
 #include <linux/capability.h>
 #include <linux/file.h>
+#include <linux/fcntl.h>
+#include <linux/namei.h>
 #include <asm/namei.h>
 #include <asm/uaccess.h>
 
@@ -1063,7 +1065,8 @@ set_it:
 }
 
 /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
-int fastcall path_lookup(const char *name, unsigned int flags, struct nameidata *nd)
+static int fastcall do_path_lookup(int dfd, const char *name,
+                               unsigned int flags, struct nameidata *nd)
 {
        int retval = 0;
 
@@ -1083,9 +1086,38 @@ int fastcall path_lookup(const char *name, unsigned int flags, struct nameidata
                }
                nd->mnt = mntget(current->fs->rootmnt);
                nd->dentry = dget(current->fs->root);
-       } else {
+       } else if (dfd == AT_FDCWD) {
                nd->mnt = mntget(current->fs->pwdmnt);
                nd->dentry = dget(current->fs->pwd);
+       } else {
+               struct file *file;
+               int fput_needed;
+               struct dentry *dentry;
+
+               file = fget_light(dfd, &fput_needed);
+               if (!file) {
+                       retval = -EBADF;
+                       goto out_fail;
+               }
+
+               dentry = file->f_dentry;
+
+               if (!S_ISDIR(dentry->d_inode->i_mode)) {
+                       retval = -ENOTDIR;
+                       fput_light(file, fput_needed);
+                       goto out_fail;
+               }
+
+               retval = file_permission(file, MAY_EXEC);
+               if (retval) {
+                       fput_light(file, fput_needed);
+                       goto out_fail;
+               }
+
+               nd->mnt = mntget(file->f_vfsmnt);
+               nd->dentry = dget(dentry);
+
+               fput_light(file, fput_needed);
        }
        read_unlock(&current->fs->lock);
        current->total_link_count = 0;
@@ -1094,11 +1126,19 @@ out:
        if (unlikely(current->audit_context
                     && nd && nd->dentry && nd->dentry->d_inode))
                audit_inode(name, nd->dentry->d_inode, flags);
+out_fail:
        return retval;
 }
 
-static int __path_lookup_intent_open(const char *name, unsigned int lookup_flags,
-               struct nameidata *nd, int open_flags, int create_mode)
+int fastcall path_lookup(const char *name, unsigned int flags,
+                       struct nameidata *nd)
+{
+       return do_path_lookup(AT_FDCWD, name, flags, nd);
+}
+
+static int __path_lookup_intent_open(int dfd, const char *name,
+               unsigned int lookup_flags, struct nameidata *nd,
+               int open_flags, int create_mode)
 {
        struct file *filp = get_empty_filp();
        int err;
@@ -1108,7 +1148,7 @@ static int __path_lookup_intent_open(const char *name, unsigned int lookup_flags
        nd->intent.open.file = filp;
        nd->intent.open.flags = open_flags;
        nd->intent.open.create_mode = create_mode;
-       err = path_lookup(name, lookup_flags|LOOKUP_OPEN, nd);
+       err = do_path_lookup(dfd, name, lookup_flags|LOOKUP_OPEN, nd);
        if (IS_ERR(nd->intent.open.file)) {
                if (err == 0) {
                        err = PTR_ERR(nd->intent.open.file);
@@ -1126,10 +1166,10 @@ static int __path_lookup_intent_open(const char *name, unsigned int lookup_flags
  * @nd: pointer to nameidata
  * @open_flags: open intent flags
  */
-int path_lookup_open(const char *name, unsigned int lookup_flags,
+int path_lookup_open(int dfd, const char *name, unsigned int lookup_flags,
                struct nameidata *nd, int open_flags)
 {
-       return __path_lookup_intent_open(name, lookup_flags, nd,
+       return __path_lookup_intent_open(dfd, name, lookup_flags, nd,
                        open_flags, 0);
 }
 
@@ -1141,12 +1181,12 @@ int path_lookup_open(const char *name, unsigned int lookup_flags,
  * @open_flags: open intent flags
  * @create_mode: create intent flags
  */
-static int path_lookup_create(const char *name, unsigned int lookup_flags,
-                             struct nameidata *nd, int open_flags,
-                             int create_mode)
+static int path_lookup_create(int dfd, const char *name,
+                             unsigned int lookup_flags, struct nameidata *nd,
+                             int open_flags, int create_mode)
 {
-       return __path_lookup_intent_open(name, lookup_flags|LOOKUP_CREATE, nd,
-                       open_flags, create_mode);
+       return __path_lookup_intent_open(dfd, name, lookup_flags|LOOKUP_CREATE,
+                       nd, open_flags, create_mode);
 }
 
 int __user_path_lookup_open(const char __user *name, unsigned int lookup_flags,
@@ -1156,7 +1196,7 @@ int __user_path_lookup_open(const char __user *name, unsigned int lookup_flags,
        int err = PTR_ERR(tmp);
 
        if (!IS_ERR(tmp)) {
-               err = __path_lookup_intent_open(tmp, lookup_flags, nd, open_flags, 0);
+               err = __path_lookup_intent_open(AT_FDCWD, tmp, lookup_flags, nd, open_flags, 0);
                putname(tmp);
        }
        return err;
@@ -1248,18 +1288,24 @@ access:
  * that namei follows links, while lnamei does not.
  * SMP-safe
  */
-int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd)
+int fastcall __user_walk_fd(int dfd, const char __user *name, unsigned flags,
+                           struct nameidata *nd)
 {
        char *tmp = getname(name);
        int err = PTR_ERR(tmp);
 
        if (!IS_ERR(tmp)) {
-               err = path_lookup(tmp, flags, nd);
+               err = do_path_lookup(dfd, tmp, flags, nd);
                putname(tmp);
        }
        return err;
 }
 
+int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd)
+{
+       return __user_walk_fd(AT_FDCWD, name, flags, nd);
+}
+
 /*
  * It's inline, so penalty for filesystems that don't use sticky bit is
  * minimal.
@@ -1518,7 +1564,8 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
  * for symlinks (where the permissions are checked later).
  * SMP-safe
  */
-int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
+int open_namei(int dfd, const char *pathname, int flag,
+               int mode, struct nameidata *nd)
 {
        int acc_mode, error;
        struct path path;
@@ -1540,7 +1587,8 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
         * The simplest case - just a plain lookup.
         */
        if (!(flag & O_CREAT)) {
-               error = path_lookup_open(pathname, lookup_flags(flag), nd, flag);
+               error = path_lookup_open(dfd, pathname, lookup_flags(flag),
+                                        nd, flag);
                if (error)
                        return error;
                goto ok;
@@ -1549,7 +1597,7 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
        /*
         * Create - we need to know the parent.
         */
-       error = path_lookup_create(pathname, LOOKUP_PARENT, nd, flag, mode);
+       error = path_lookup_create(dfd,pathname,LOOKUP_PARENT,nd,flag,mode);
        if (error)
                return error;
 
@@ -1744,7 +1792,8 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
        return error;
 }
 
-asmlinkage long sys_mknod(const char __user * filename, int mode, unsigned dev)
+asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode,
+                               unsigned dev)
 {
        int error = 0;
        char * tmp;
@@ -1757,7 +1806,7 @@ asmlinkage long sys_mknod(const char __user * filename, int mode, unsigned dev)
        if (IS_ERR(tmp))
                return PTR_ERR(tmp);
 
-       error = path_lookup(tmp, LOOKUP_PARENT, &nd);
+       error = do_path_lookup(dfd, tmp, LOOKUP_PARENT, &nd);
        if (error)
                goto out;
        dentry = lookup_create(&nd, 0);
@@ -1793,6 +1842,11 @@ out:
        return error;
 }
 
+asmlinkage long sys_mknod(const char __user *filename, int mode, unsigned dev)
+{
+       return sys_mknodat(AT_FDCWD, filename, mode, dev);
+}
+
 int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 {
        int error = may_create(dir, dentry, NULL);
@@ -1815,7 +1869,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        return error;
 }
 
-asmlinkage long sys_mkdir(const char __user * pathname, int mode)
+asmlinkage long sys_mkdirat(int dfd, const char __user *pathname, int mode)
 {
        int error = 0;
        char * tmp;
@@ -1826,7 +1880,7 @@ asmlinkage long sys_mkdir(const char __user * pathname, int mode)
                struct dentry *dentry;
                struct nameidata nd;
 
-               error = path_lookup(tmp, LOOKUP_PARENT, &nd);
+               error = do_path_lookup(dfd, tmp, LOOKUP_PARENT, &nd);
                if (error)
                        goto out;
                dentry = lookup_create(&nd, 1);
@@ -1846,6 +1900,11 @@ out:
        return error;
 }
 
+asmlinkage long sys_mkdir(const char __user *pathname, int mode)
+{
+       return sys_mkdirat(AT_FDCWD, pathname, mode);
+}
+
 /*
  * We try to drop the dentry early: we should have
  * a usage count of 2 if we're the only user of this
@@ -1907,7 +1966,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
        return error;
 }
 
-asmlinkage long sys_rmdir(const char __user * pathname)
+static long do_rmdir(int dfd, const char __user *pathname)
 {
        int error = 0;
        char * name;
@@ -1918,7 +1977,7 @@ asmlinkage long sys_rmdir(const char __user * pathname)
        if(IS_ERR(name))
                return PTR_ERR(name);
 
-       error = path_lookup(name, LOOKUP_PARENT, &nd);
+       error = do_path_lookup(dfd, name, LOOKUP_PARENT, &nd);
        if (error)
                goto exit;
 
@@ -1948,6 +2007,11 @@ exit:
        return error;
 }
 
+asmlinkage long sys_rmdir(const char __user *pathname)
+{
+       return do_rmdir(AT_FDCWD, pathname);
+}
+
 int vfs_unlink(struct inode *dir, struct dentry *dentry)
 {
        int error = may_delete(dir, dentry, 0);
@@ -1984,7 +2048,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
  * writeout happening, and we don't want to prevent access to the directory
  * while waiting on the I/O.
  */
-asmlinkage long sys_unlink(const char __user * pathname)
+static long do_unlinkat(int dfd, const char __user *pathname)
 {
        int error = 0;
        char * name;
@@ -1996,7 +2060,7 @@ asmlinkage long sys_unlink(const char __user * pathname)
        if(IS_ERR(name))
                return PTR_ERR(name);
 
-       error = path_lookup(name, LOOKUP_PARENT, &nd);
+       error = do_path_lookup(dfd, name, LOOKUP_PARENT, &nd);
        if (error)
                goto exit;
        error = -EISDIR;
@@ -2031,6 +2095,22 @@ slashes:
        goto exit2;
 }
 
+asmlinkage long sys_unlinkat(int dfd, const char __user *pathname, int flag)
+{
+       if ((flag & ~AT_REMOVEDIR) != 0)
+               return -EINVAL;
+
+       if (flag & AT_REMOVEDIR)
+               return do_rmdir(dfd, pathname);
+
+       return do_unlinkat(dfd, pathname);
+}
+
+asmlinkage long sys_unlink(const char __user *pathname)
+{
+       return do_unlinkat(AT_FDCWD, pathname);
+}
+
 int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname, int mode)
 {
        int error = may_create(dir, dentry, NULL);
@@ -2052,7 +2132,8 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname, i
        return error;
 }
 
-asmlinkage long sys_symlink(const char __user * oldname, const char __user * newname)
+asmlinkage long sys_symlinkat(const char __user *oldname,
+                             int newdfd, const char __user *newname)
 {
        int error = 0;
        char * from;
@@ -2067,7 +2148,7 @@ asmlinkage long sys_symlink(const char __user * oldname, const char __user * new
                struct dentry *dentry;
                struct nameidata nd;
 
-               error = path_lookup(to, LOOKUP_PARENT, &nd);
+               error = do_path_lookup(newdfd, to, LOOKUP_PARENT, &nd);
                if (error)
                        goto out;
                dentry = lookup_create(&nd, 0);
@@ -2085,6 +2166,11 @@ out:
        return error;
 }
 
+asmlinkage long sys_symlink(const char __user *oldname, const char __user *newname)
+{
+       return sys_symlinkat(oldname, AT_FDCWD, newname);
+}
+
 int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
 {
        struct inode *inode = old_dentry->d_inode;
@@ -2132,7 +2218,8 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
  * with linux 2.0, and to avoid hard-linking to directories
  * and other special files.  --ADM
  */
-asmlinkage long sys_link(const char __user * oldname, const char __user * newname)
+asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
+                          int newdfd, const char __user *newname)
 {
        struct dentry *new_dentry;
        struct nameidata nd, old_nd;
@@ -2143,10 +2230,10 @@ asmlinkage long sys_link(const char __user * oldname, const char __user * newnam
        if (IS_ERR(to))
                return PTR_ERR(to);
 
-       error = __user_walk(oldname, 0, &old_nd);
+       error = __user_walk_fd(olddfd, oldname, 0, &old_nd);
        if (error)
                goto exit;
-       error = path_lookup(to, LOOKUP_PARENT, &nd);
+       error = do_path_lookup(newdfd, to, LOOKUP_PARENT, &nd);
        if (error)
                goto out;
        error = -EXDEV;
@@ -2169,6 +2256,11 @@ exit:
        return error;
 }
 
+asmlinkage long sys_link(const char __user *oldname, const char __user *newname)
+{
+       return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname);
+}
+
 /*
  * The worst of all namespace operations - renaming directory. "Perverted"
  * doesn't even start to describe it. Somebody in UCB had a heck of a trip...
@@ -2315,7 +2407,8 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        return error;
 }
 
-static int do_rename(const char * oldname, const char * newname)
+static int do_rename(int olddfd, const char *oldname,
+                       int newdfd, const char *newname)
 {
        int error = 0;
        struct dentry * old_dir, * new_dir;
@@ -2323,11 +2416,11 @@ static int do_rename(const char * oldname, const char * newname)
        struct dentry * trap;
        struct nameidata oldnd, newnd;
 
-       error = path_lookup(oldname, LOOKUP_PARENT, &oldnd);
+       error = do_path_lookup(olddfd, oldname, LOOKUP_PARENT, &oldnd);
        if (error)
                goto exit;
 
-       error = path_lookup(newname, LOOKUP_PARENT, &newnd);
+       error = do_path_lookup(newdfd, newname, LOOKUP_PARENT, &newnd);
        if (error)
                goto exit1;
 
@@ -2391,7 +2484,8 @@ exit:
        return error;
 }
 
-asmlinkage long sys_rename(const char __user * oldname, const char __user * newname)
+asmlinkage long sys_renameat(int olddfd, const char __user *oldname,
+                            int newdfd, const char __user *newname)
 {
        int error;
        char * from;
@@ -2403,13 +2497,18 @@ asmlinkage long sys_rename(const char __user * oldname, const char __user * newn
        to = getname(newname);
        error = PTR_ERR(to);
        if (!IS_ERR(to)) {
-               error = do_rename(from,to);
+               error = do_rename(olddfd, from, newdfd, to);
                putname(to);
        }
        putname(from);
        return error;
 }
 
+asmlinkage long sys_rename(const char __user *oldname, const char __user *newname)
+{
+       return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname);
+}
+
 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
 {
        int len;
@@ -2553,6 +2652,7 @@ struct inode_operations page_symlink_inode_operations = {
 };
 
 EXPORT_SYMBOL(__user_walk);
+EXPORT_SYMBOL(__user_walk_fd);
 EXPORT_SYMBOL(follow_down);
 EXPORT_SYMBOL(follow_up);
 EXPORT_SYMBOL(get_write_access); /* binfmt_aout */
index 0b14938b5b627ceac85590762fb50ec68f1c872f..0d4cf948606866f71d1807883da0afbf35968668 100644 (file)
@@ -5,6 +5,7 @@
  *
  */
 #include <linux/config.h>
+#include <linux/types.h>
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/sunrpc/svc.h>
index 361b4007d4a00867059e885855e26549ea702da3..a00fe86862935a5c053505baca0fd6d1cdbdad09 100644 (file)
@@ -192,6 +192,14 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
        }
        if (status)
                goto out;
+
+       /* Openowner is now set, so sequence id will get bumped.  Now we need
+        * these checks before we do any creates: */
+       if (nfs4_in_grace() && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
+               return nfserr_grace;
+       if (!nfs4_in_grace() && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
+               return nfserr_no_grace;
+
        switch (open->op_claim_type) {
                case NFS4_OPEN_CLAIM_DELEGATE_CUR:
                        status = nfserr_inval;
@@ -210,6 +218,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
                                goto out;
                        break;
                case NFS4_OPEN_CLAIM_PREVIOUS:
+                       open->op_stateowner->so_confirmed = 1;
                        /*
                         * The CURRENT_FH is already set to the file being
                         * opened.  (1) set open->op_cinfo, (2) set
@@ -221,6 +230,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open
                                goto out;
                        break;
                case NFS4_OPEN_CLAIM_DELEGATE_PREV:
+                       open->op_stateowner->so_confirmed = 1;
                        printk("NFSD: unsupported OPEN claim type %d\n",
                                open->op_claim_type);
                        status = nfserr_notsupp;
@@ -584,31 +594,23 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_se
 {
        int status = nfs_ok;
 
-       if (!current_fh->fh_dentry)
-               return nfserr_nofilehandle;
-
-       status = nfs_ok;
        if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
                nfs4_lock_state();
-               if ((status = nfs4_preprocess_stateid_op(current_fh,
-                                               &setattr->sa_stateid,
-                                               CHECK_FH | WR_STATE, NULL))) {
-                       dprintk("NFSD: nfsd4_setattr: couldn't process stateid!\n");
-                       goto out_unlock;
-               }
+               status = nfs4_preprocess_stateid_op(current_fh,
+                       &setattr->sa_stateid, CHECK_FH | WR_STATE, NULL);
                nfs4_unlock_state();
+               if (status) {
+                       dprintk("NFSD: nfsd4_setattr: couldn't process stateid!");
+                       return status;
+               }
        }
        status = nfs_ok;
        if (setattr->sa_acl != NULL)
                status = nfsd4_set_nfs4_acl(rqstp, current_fh, setattr->sa_acl);
        if (status)
-               goto out;
+               return status;
        status = nfsd_setattr(rqstp, current_fh, &setattr->sa_iattr,
                                0, (time_t)0);
-out:
-       return status;
-out_unlock:
-       nfs4_unlock_state();
        return status;
 }
 
@@ -626,15 +628,17 @@ nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_writ
                return nfserr_inval;
 
        nfs4_lock_state();
-       if ((status = nfs4_preprocess_stateid_op(current_fh, stateid,
-                                       CHECK_FH | WR_STATE, &filp))) {
-               dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
-               goto out;
-       }
+       status = nfs4_preprocess_stateid_op(current_fh, stateid,
+                                       CHECK_FH | WR_STATE, &filp);
        if (filp)
                get_file(filp);
        nfs4_unlock_state();
 
+       if (status) {
+               dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
+               return status;
+       }
+
        write->wr_bytes_written = write->wr_buflen;
        write->wr_how_written = write->wr_stable_how;
        p = (u32 *)write->wr_verifier.data;
@@ -650,9 +654,6 @@ nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_writ
        if (status == nfserr_symlink)
                status = nfserr_inval;
        return status;
-out:
-       nfs4_unlock_state();
-       return status;
 }
 
 /* This routine never returns NFS_OK!  If there are no other errors, it
@@ -768,6 +769,8 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
        while (!status && resp->opcnt < args->opcnt) {
                op = &args->ops[resp->opcnt++];
 
+               dprintk("nfsv4 compound op #%d: %d\n", resp->opcnt, op->opnum);
+
                /*
                 * The XDR decode routines may have pre-set op->status;
                 * for example, if there is a miscellaneous XDR error
@@ -792,17 +795,13 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
                /* All operations except RENEW, SETCLIENTID, RESTOREFH
                * SETCLIENTID_CONFIRM, PUTFH and PUTROOTFH
                * require a valid current filehandle
-               *
-               * SETATTR NOFILEHANDLE error handled in nfsd4_setattr
-               * due to required returned bitmap argument
                */
                if ((!current_fh->fh_dentry) &&
                   !((op->opnum == OP_PUTFH) || (op->opnum == OP_PUTROOTFH) ||
                   (op->opnum == OP_SETCLIENTID) ||
                   (op->opnum == OP_SETCLIENTID_CONFIRM) ||
                   (op->opnum == OP_RENEW) || (op->opnum == OP_RESTOREFH) ||
-                  (op->opnum == OP_RELEASE_LOCKOWNER) ||
-                  (op->opnum == OP_SETATTR))) {
+                  (op->opnum == OP_RELEASE_LOCKOWNER))) {
                        op->status = nfserr_nofilehandle;
                        goto encode_op;
                }
index be963a133aaafa9f2e7856d730ba0d79a988c76d..06da7506363cbcd1548c0e0a59302db497d7ca21 100644 (file)
@@ -222,8 +222,7 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
 
        nfs4_save_user(&uid, &gid);
 
-       filp = dentry_open(dget(dir), mntget(rec_dir.mnt),
-                       O_RDWR);
+       filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY);
        status = PTR_ERR(filp);
        if (IS_ERR(filp))
                goto out;
@@ -400,9 +399,10 @@ nfsd4_init_recdir(char *rec_dirname)
 
        nfs4_save_user(&uid, &gid);
 
-       status = path_lookup(rec_dirname, LOOKUP_FOLLOW, &rec_dir);
-       if (status == -ENOENT)
-               printk("NFSD: recovery directory %s doesn't exist\n",
+       status = path_lookup(rec_dirname, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
+                       &rec_dir);
+       if (status)
+               printk("NFSD: unable to find recovery directory %s\n",
                                rec_dirname);
 
        if (!status)
index 6bbefd06f10dea127ee0bf406eb1ef01fcfd4247..1143cfb6454900e73bb65a29e315e7a5810b9114 100644 (file)
@@ -1088,7 +1088,7 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, str
        sop->so_seqid = open->op_seqid;
        sop->so_confirmed = 0;
        rp = &sop->so_replay;
-       rp->rp_status = NFSERR_SERVERFAULT;
+       rp->rp_status = nfserr_serverfault;
        rp->rp_buflen = 0;
        rp->rp_buf = rp->rp_ibuf;
        return sop;
@@ -1178,7 +1178,6 @@ release_stateid(struct nfs4_stateid *stp, int flags)
                locks_remove_posix(filp, (fl_owner_t) stp->st_stateowner);
        put_nfs4_file(stp->st_file);
        kmem_cache_free(stateid_slab, stp);
-       stp = NULL;
 }
 
 static void
@@ -1191,22 +1190,6 @@ move_to_close_lru(struct nfs4_stateowner *sop)
        sop->so_time = get_seconds();
 }
 
-static void
-release_state_owner(struct nfs4_stateid *stp, int flag)
-{
-       struct nfs4_stateowner *sop = stp->st_stateowner;
-
-       dprintk("NFSD: release_state_owner\n");
-       release_stateid(stp, flag);
-
-       /* place unused nfs4_stateowners on so_close_lru list to be
-        * released by the laundromat service after the lease period
-        * to enable us to handle CLOSE replay
-        */
-       if (sop->so_confirmed && list_empty(&sop->so_stateids))
-               move_to_close_lru(sop);
-}
-
 static int
 cmp_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner, clientid_t *clid) {
        return ((sop->so_owner.len == owner->len) && 
@@ -1446,92 +1429,61 @@ static struct lock_manager_operations nfsd_lease_mng_ops = {
 };
 
 
-/*
- * nfsd4_process_open1()
- *     lookup stateowner.
- *             found:
- *                     check confirmed 
- *                             confirmed:
- *                                     check seqid
- *                             not confirmed:
- *                                     delete owner
- *                                     create new owner
- *             notfound:
- *                     verify clientid
- *                     create new owner
- *
- * called with nfs4_lock_state() held.
- */
 int
 nfsd4_process_open1(struct nfsd4_open *open)
 {
-       int status;
        clientid_t *clientid = &open->op_clientid;
        struct nfs4_client *clp = NULL;
        unsigned int strhashval;
        struct nfs4_stateowner *sop = NULL;
 
-       status = nfserr_inval;
        if (!check_name(open->op_owner))
-               goto out;
+               return nfserr_inval;
 
        if (STALE_CLIENTID(&open->op_clientid))
                return nfserr_stale_clientid;
 
        strhashval = ownerstr_hashval(clientid->cl_id, open->op_owner);
        sop = find_openstateowner_str(strhashval, open);
-       if (sop) {
-               open->op_stateowner = sop;
-               /* check for replay */
-               if (open->op_seqid == sop->so_seqid - 1){
-                       if (sop->so_replay.rp_buflen)
-                               return NFSERR_REPLAY_ME;
-                       else {
-                               /* The original OPEN failed so spectacularly
-                                * that we don't even have replay data saved!
-                                * Therefore, we have no choice but to continue
-                                * processing this OPEN; presumably, we'll
-                                * fail again for the same reason.
-                                */
-                               dprintk("nfsd4_process_open1:"
-                                       " replay with no replay cache\n");
-                               goto renew;
-                       }
-               } else if (sop->so_confirmed) {
-                       if (open->op_seqid == sop->so_seqid)
-                               goto renew;
-                       status = nfserr_bad_seqid;
-                       goto out;
-               } else {
-                       /* If we get here, we received an OPEN for an
-                        * unconfirmed nfs4_stateowner. Since the seqid's are
-                        * different, purge the existing nfs4_stateowner, and
-                        * instantiate a new one.
-                        */
-                       clp = sop->so_client;
-                       release_stateowner(sop);
-               }
-       } else {
-               /* nfs4_stateowner not found.
-                * Verify clientid and instantiate new nfs4_stateowner.
-                * If verify fails this is presumably the result of the
-                * client's lease expiring.
-                */
-               status = nfserr_expired;
+       open->op_stateowner = sop;
+       if (!sop) {
+               /* Make sure the client's lease hasn't expired. */
                clp = find_confirmed_client(clientid);
                if (clp == NULL)
-                       goto out;
+                       return nfserr_expired;
+               goto renew;
        }
-       status = nfserr_resource;
-       sop = alloc_init_open_stateowner(strhashval, clp, open);
-       if (sop == NULL)
-               goto out;
-       open->op_stateowner = sop;
+       if (!sop->so_confirmed) {
+               /* Replace unconfirmed owners without checking for replay. */
+               clp = sop->so_client;
+               release_stateowner(sop);
+               open->op_stateowner = NULL;
+               goto renew;
+       }
+       if (open->op_seqid == sop->so_seqid - 1) {
+               if (sop->so_replay.rp_buflen)
+                       return NFSERR_REPLAY_ME;
+               /* The original OPEN failed so spectacularly
+                * that we don't even have replay data saved!
+                * Therefore, we have no choice but to continue
+                * processing this OPEN; presumably, we'll
+                * fail again for the same reason.
+                */
+               dprintk("nfsd4_process_open1: replay with no replay cache\n");
+               goto renew;
+       }
+       if (open->op_seqid != sop->so_seqid)
+               return nfserr_bad_seqid;
 renew:
-       status = nfs_ok;
+       if (open->op_stateowner == NULL) {
+               sop = alloc_init_open_stateowner(strhashval, clp, open);
+               if (sop == NULL)
+                       return nfserr_resource;
+               open->op_stateowner = sop;
+       }
+       list_del_init(&sop->so_close_lru);
        renew_client(sop->so_client);
-out:
-       return status;
+       return nfs_ok;
 }
 
 static inline int
@@ -1648,7 +1600,7 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
        if (!open->op_truncate)
                return 0;
        if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
-               return -EINVAL;
+               return nfserr_inval;
        return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
 }
 
@@ -1657,26 +1609,26 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_sta
 {
        struct file *filp = stp->st_vfs_file;
        struct inode *inode = filp->f_dentry->d_inode;
-       unsigned int share_access;
+       unsigned int share_access, new_writer;
        int status;
 
        set_access(&share_access, stp->st_access_bmap);
-       share_access = ~share_access;
-       share_access &= open->op_share_access;
-
-       if (!(share_access & NFS4_SHARE_ACCESS_WRITE))
-               return nfsd4_truncate(rqstp, cur_fh, open);
+       new_writer = (~share_access) & open->op_share_access
+                       & NFS4_SHARE_ACCESS_WRITE;
 
-       status = get_write_access(inode);
-       if (status)
-               return nfserrno(status);
+       if (new_writer) {
+               status = get_write_access(inode);
+               if (status)
+                       return nfserrno(status);
+       }
        status = nfsd4_truncate(rqstp, cur_fh, open);
        if (status) {
-               put_write_access(inode);
+               if (new_writer)
+                       put_write_access(inode);
                return status;
        }
        /* remember the open */
-       filp->f_mode = (filp->f_mode | FMODE_WRITE) & ~FMODE_READ;
+       filp->f_mode |= open->op_share_access;
        set_bit(open->op_share_access, &stp->st_access_bmap);
        set_bit(open->op_share_deny, &stp->st_deny_bmap);
 
@@ -1780,12 +1732,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
        struct nfs4_delegation *dp = NULL;
        int status;
 
-       if (nfs4_in_grace() && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
-               return nfserr_grace;
-
-       if (!nfs4_in_grace() && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
-               return nfserr_no_grace;
-
        status = nfserr_inval;
        if (!TEST_ACCESS(open->op_share_access) || !TEST_DENY(open->op_share_deny))
                goto out;
@@ -2423,15 +2369,19 @@ nfsd4_close(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_clos
                                        CHECK_FH | OPEN_STATE | CLOSE_STATE,
                                        &close->cl_stateowner, &stp, NULL)))
                goto out; 
-       /*
-       *  Return success, but first update the stateid.
-       */
        status = nfs_ok;
        update_stateid(&stp->st_stateid);
        memcpy(&close->cl_stateid, &stp->st_stateid, sizeof(stateid_t));
 
-       /* release_state_owner() calls nfsd_close() if needed */
-       release_state_owner(stp, OPEN_STATE);
+       /* release_stateid() calls nfsd_close() if needed */
+       release_stateid(stp, OPEN_STATE);
+
+       /* place unused nfs4_stateowners on so_close_lru list to be
+        * released by the laundromat service after the lease period
+        * to enable us to handle CLOSE replay
+        */
+       if (list_empty(&close->cl_stateowner->so_stateids))
+               move_to_close_lru(close->cl_stateowner);
 out:
        if (close->cl_stateowner) {
                nfs4_get_stateowner(close->cl_stateowner);
@@ -2633,7 +2583,7 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, str
        sop->so_seqid = lock->lk_new_lock_seqid + 1;
        sop->so_confirmed = 1;
        rp = &sop->so_replay;
-       rp->rp_status = NFSERR_SERVERFAULT;
+       rp->rp_status = nfserr_serverfault;
        rp->rp_buflen = 0;
        rp->rp_buf = rp->rp_ibuf;
        return sop;
@@ -2700,6 +2650,11 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
        if (check_lock_length(lock->lk_offset, lock->lk_length))
                 return nfserr_inval;
 
+       if ((status = fh_verify(rqstp, current_fh, S_IFREG, MAY_LOCK))) {
+               dprintk("NFSD: nfsd4_lock: permission denied!\n");
+               return status;
+       }
+
        nfs4_lock_state();
 
        if (lock->lk_is_new) {
@@ -2720,11 +2675,11 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
                                        lock->lk_new_open_seqid,
                                        &lock->lk_new_open_stateid,
                                        CHECK_FH | OPEN_STATE,
-                                       &lock->lk_stateowner, &open_stp,
+                                       &lock->lk_replay_owner, &open_stp,
                                        lock);
                if (status)
                        goto out;
-               open_sop = lock->lk_stateowner;
+               open_sop = lock->lk_replay_owner;
                /* create lockowner and lock stateid */
                fp = open_stp->st_file;
                strhashval = lock_ownerstr_hashval(fp->fi_inode, 
@@ -2739,29 +2694,22 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
                if (lock_sop == NULL)
                        goto out;
                lock_stp = alloc_init_lock_stateid(lock_sop, fp, open_stp);
-               if (lock_stp == NULL) {
-                       release_stateowner(lock_sop);
+               if (lock_stp == NULL)
                        goto out;
-               }
        } else {
                /* lock (lock owner + lock stateid) already exists */
                status = nfs4_preprocess_seqid_op(current_fh,
                                       lock->lk_old_lock_seqid, 
                                       &lock->lk_old_lock_stateid, 
                                       CHECK_FH | LOCK_STATE, 
-                                      &lock->lk_stateowner, &lock_stp, lock);
+                                      &lock->lk_replay_owner, &lock_stp, lock);
                if (status)
                        goto out;
-               lock_sop = lock->lk_stateowner;
+               lock_sop = lock->lk_replay_owner;
        }
-       /* lock->lk_stateowner and lock_stp have been created or found */
+       /* lock->lk_replay_owner and lock_stp have been created or found */
        filp = lock_stp->st_vfs_file;
 
-       if ((status = fh_verify(rqstp, current_fh, S_IFREG, MAY_LOCK))) {
-               dprintk("NFSD: nfsd4_lock: permission denied!\n");
-               goto out;
-       }
-
        status = nfserr_grace;
        if (nfs4_in_grace() && !lock->lk_reclaim)
                goto out;
@@ -2802,8 +2750,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
        */
 
        status = posix_lock_file(filp, &file_lock);
-       if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private)
-               file_lock.fl_ops->fl_release_private(&file_lock);
        dprintk("NFSD: nfsd4_lock: posix_lock_file status %d\n",status);
        switch (-status) {
        case 0: /* success! */
@@ -2815,9 +2761,12 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
                goto conflicting_lock;
        case (EDEADLK):
                status = nfserr_deadlock;
+               dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status);
+               goto out;
        default:        
+               status = nfserrno(status);
                dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status);
-               goto out_destroy_new_stateid;
+               goto out;
        }
 
 conflicting_lock:
@@ -2831,20 +2780,12 @@ conflicting_lock:
                goto out;
        }
        nfs4_set_lock_denied(conflock, &lock->lk_denied);
-
-out_destroy_new_stateid:
-       if (lock->lk_is_new) {
-               dprintk("NFSD: nfsd4_lock: destroy new stateid!\n");
-               /*
-                * An error encountered after instantiation of the new
-                * stateid has forced us to destroy it.
-                */
-               release_state_owner(lock_stp, LOCK_STATE);
-       }
 out:
-       if (lock->lk_stateowner) {
-               nfs4_get_stateowner(lock->lk_stateowner);
-               *replay_owner = lock->lk_stateowner;
+       if (status && lock->lk_is_new && lock_sop)
+               release_stateowner(lock_sop);
+       if (lock->lk_replay_owner) {
+               nfs4_get_stateowner(lock->lk_replay_owner);
+               *replay_owner = lock->lk_replay_owner;
        }
        nfs4_unlock_state();
        return status;
@@ -2977,8 +2918,6 @@ nfsd4_locku(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock
        *  Try to unlock the file in the VFS.
        */
        status = posix_lock_file(filp, &file_lock); 
-       if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private)
-               file_lock.fl_ops->fl_release_private(&file_lock);
        if (status) {
                dprintk("NFSD: nfs4_locku: posix_lock_file failed!\n");
                goto out_nfserr;
@@ -3016,9 +2955,10 @@ check_for_locks(struct file *filp, struct nfs4_stateowner *lowner)
 
        lock_kernel();
        for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
-               if ((*flpp)->fl_owner == (fl_owner_t)lowner)
+               if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
                        status = 1;
                        goto out;
+               }
        }
 out:
        unlock_kernel();
index dcd673186944a22f48317c3ecf52e889d1357b86..69d3501173a8e79b2b0c01c167a45713f5867c2a 100644 (file)
@@ -528,7 +528,7 @@ nfsd4_decode_lock(struct nfsd4_compoundargs *argp, struct nfsd4_lock *lock)
 {
        DECODE_HEAD;
 
-       lock->lk_stateowner = NULL;
+       lock->lk_replay_owner = NULL;
        /*
        * type, reclaim(boolean), offset, length, new_lock_owner(boolean)
        */
@@ -1764,10 +1764,11 @@ nfsd4_encode_dirent(struct readdir_cd *ccd, const char *name, int namlen,
                 */
                if (!(cd->rd_bmval[0] & FATTR4_WORD0_RDATTR_ERROR))
                        goto fail;
-               nfserr = nfserr_toosmall;
                p = nfsd4_encode_rdattr_error(p, buflen, nfserr);
-               if (p == NULL)
+               if (p == NULL) {
+                       nfserr = nfserr_toosmall;
                        goto fail;
+               }
        }
        cd->buflen -= (p - cd->buffer);
        cd->buffer = p;
@@ -1895,7 +1896,6 @@ nfsd4_encode_lock_denied(struct nfsd4_compoundres *resp, struct nfsd4_lock_denie
 static void
 nfsd4_encode_lock(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_lock *lock)
 {
-
        ENCODE_SEQID_OP_HEAD;
 
        if (!nfserr) {
@@ -1906,7 +1906,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_lock
        } else if (nfserr == nfserr_denied)
                nfsd4_encode_lock_denied(resp, &lock->lk_denied);
 
-       ENCODE_SEQID_OP_TAIL(lock->lk_stateowner);
+       ENCODE_SEQID_OP_TAIL(lock->lk_replay_owner);
 }
 
 static void
index 0aa1b9603d7f6282a00cbe2e7c7efafb89e44926..3e6b75cd90fd162093bceb4426f4f6bbdbf4e789 100644 (file)
@@ -36,6 +36,22 @@ nfsd_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
        return nfs_ok;
 }
 
+static int
+nfsd_return_attrs(int err, struct nfsd_attrstat *resp)
+{
+       if (err) return err;
+       return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt,
+                                   resp->fh.fh_dentry,
+                                   &resp->stat));
+}
+static int
+nfsd_return_dirop(int err, struct nfsd_diropres *resp)
+{
+       if (err) return err;
+       return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt,
+                                   resp->fh.fh_dentry,
+                                   &resp->stat));
+}
 /*
  * Get a file's attributes
  * N.B. After this call resp->fh needs an fh_put
@@ -44,10 +60,12 @@ static int
 nfsd_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle  *argp,
                                          struct nfsd_attrstat *resp)
 {
+       int nfserr;
        dprintk("nfsd: GETATTR  %s\n", SVCFH_fmt(&argp->fh));
 
        fh_copy(&resp->fh, &argp->fh);
-       return fh_verify(rqstp, &resp->fh, 0, MAY_NOP);
+       nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP);
+       return nfsd_return_attrs(nfserr, resp);
 }
 
 /*
@@ -58,12 +76,14 @@ static int
 nfsd_proc_setattr(struct svc_rqst *rqstp, struct nfsd_sattrargs *argp,
                                          struct nfsd_attrstat  *resp)
 {
+       int nfserr;
        dprintk("nfsd: SETATTR  %s, valid=%x, size=%ld\n",
                SVCFH_fmt(&argp->fh),
                argp->attrs.ia_valid, (long) argp->attrs.ia_size);
 
        fh_copy(&resp->fh, &argp->fh);
-       return nfsd_setattr(rqstp, &resp->fh, &argp->attrs,0, (time_t)0);
+       nfserr = nfsd_setattr(rqstp, &resp->fh, &argp->attrs,0, (time_t)0);
+       return nfsd_return_attrs(nfserr, resp);
 }
 
 /*
@@ -86,7 +106,7 @@ nfsd_proc_lookup(struct svc_rqst *rqstp, struct nfsd_diropargs *argp,
                                 &resp->fh);
 
        fh_put(&argp->fh);
-       return nfserr;
+       return nfsd_return_dirop(nfserr, resp);
 }
 
 /*
@@ -142,7 +162,10 @@ nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp,
                                  argp->vec, argp->vlen,
                                  &resp->count);
 
-       return nfserr;
+       if (nfserr) return nfserr;
+       return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt,
+                                   resp->fh.fh_dentry,
+                                   &resp->stat));
 }
 
 /*
@@ -165,7 +188,7 @@ nfsd_proc_write(struct svc_rqst *rqstp, struct nfsd_writeargs *argp,
                                   argp->vec, argp->vlen,
                                   argp->len,
                                   &stable);
-       return nfserr;
+       return nfsd_return_attrs(nfserr, resp);
 }
 
 /*
@@ -322,7 +345,7 @@ out_unlock:
 
 done:
        fh_put(dirfhp);
-       return nfserr;
+       return nfsd_return_dirop(nfserr, resp);
 }
 
 static int
@@ -425,7 +448,7 @@ nfsd_proc_mkdir(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
        nfserr = nfsd_create(rqstp, &argp->fh, argp->name, argp->len,
                                    &argp->attrs, S_IFDIR, 0, &resp->fh);
        fh_put(&argp->fh);
-       return nfserr;
+       return nfsd_return_dirop(nfserr, resp);
 }
 
 /*
index eef0576a77857577805e053d428ebe13bd6ce9a6..5320e5afaddbe0498c8cbde8e54cba835ab374a3 100644 (file)
@@ -710,14 +710,15 @@ static inline int nfsd_dosync(struct file *filp, struct dentry *dp,
 {
        struct inode *inode = dp->d_inode;
        int (*fsync) (struct file *, struct dentry *, int);
-       int err = nfs_ok;
+       int err;
 
-       filemap_fdatawrite(inode->i_mapping);
-       if (fop && (fsync = fop->fsync))
-               err=fsync(filp, dp, 0);
-       filemap_fdatawait(inode->i_mapping);
+       err = filemap_fdatawrite(inode->i_mapping);
+       if (err == 0 && fop && (fsync = fop->fsync))
+               err = fsync(filp, dp, 0);
+       if (err == 0)
+               err = filemap_fdatawait(inode->i_mapping);
 
-       return nfserrno(err);
+       return err;
 }
        
 
@@ -734,10 +735,10 @@ nfsd_sync(struct file *filp)
        return err;
 }
 
-void
+int
 nfsd_sync_dir(struct dentry *dp)
 {
-       nfsd_dosync(NULL, dp, dp->d_inode->i_fop);
+       return nfsd_dosync(NULL, dp, dp->d_inode->i_fop);
 }
 
 /*
@@ -814,7 +815,7 @@ nfsd_read_actor(read_descriptor_t *desc, struct page *page, unsigned long offset
        return size;
 }
 
-static inline int
+static int
 nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
               loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
 {
@@ -878,7 +879,7 @@ static void kill_suid(struct dentry *dentry)
        mutex_unlock(&dentry->d_inode->i_mutex);
 }
 
-static inline int
+static int
 nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
                                loff_t offset, struct kvec *vec, int vlen,
                                unsigned long cnt, int *stablep)
@@ -890,9 +891,9 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
        int                     err = 0;
        int                     stable = *stablep;
 
+#ifdef MSNFS
        err = nfserr_perm;
 
-#ifdef MSNFS
        if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
                (!lock_may_write(file->f_dentry->d_inode, offset, cnt)))
                goto out;
@@ -1064,7 +1065,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
                return err;
        if (EX_ISSYNC(fhp->fh_export)) {
                if (file->f_op && file->f_op->fsync) {
-                       err = nfsd_sync(file);
+                       err = nfserrno(nfsd_sync(file));
                } else {
                        err = nfserr_notsupp;
                }
@@ -1132,7 +1133,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
                                "nfsd_create: parent %s/%s not locked!\n",
                                dentry->d_parent->d_name.name,
                                dentry->d_name.name);
-                       err = -EIO;
+                       err = nfserr_io;
                        goto out;
                }
        }
@@ -1175,7 +1176,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
                goto out_nfserr;
 
        if (EX_ISSYNC(fhp->fh_export)) {
-               nfsd_sync_dir(dentry);
+               err = nfserrno(nfsd_sync_dir(dentry));
                write_inode_now(dchild->d_inode, 1);
        }
 
@@ -1185,9 +1186,11 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
         * send along the gid when it tries to implement setgid
         * directories via NFS.
         */
-       err = 0;
-       if ((iap->ia_valid &= ~(ATTR_UID|ATTR_GID|ATTR_MODE)) != 0)
-               err = nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0);
+       if ((iap->ia_valid &= ~(ATTR_UID|ATTR_GID|ATTR_MODE)) != 0) {
+               int err2 = nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0);
+               if (err2)
+                       err = err2;
+       }
        /*
         * Update the file handle to get the new inode info.
         */
@@ -1306,17 +1309,10 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
                goto out_nfserr;
 
        if (EX_ISSYNC(fhp->fh_export)) {
-               nfsd_sync_dir(dentry);
+               err = nfserrno(nfsd_sync_dir(dentry));
                /* setattr will sync the child (or not) */
        }
 
-       /*
-        * Update the filehandle to get the new inode info.
-        */
-       err = fh_update(resfhp);
-       if (err)
-               goto out;
-
        if (createmode == NFS3_CREATE_EXCLUSIVE) {
                /* Cram the verifier into atime/mtime/mode */
                iap->ia_valid = ATTR_MTIME|ATTR_ATIME
@@ -1337,8 +1333,17 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
         * implement setgid directories via NFS. Clear out all that cruft.
         */
  set_attr:
-       if ((iap->ia_valid &= ~(ATTR_UID|ATTR_GID)) != 0)
-               err = nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0);
+       if ((iap->ia_valid &= ~(ATTR_UID|ATTR_GID)) != 0) {
+               int err2 = nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0);
+               if (err2)
+                       err = err2;
+       }
+
+       /*
+        * Update the filehandle to get the new inode info.
+        */
+       if (!err)
+               err = fh_update(resfhp);
 
  out:
        fh_unlock(fhp);
@@ -1447,10 +1452,10 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
        } else
                err = vfs_symlink(dentry->d_inode, dnew, path, mode);
 
-       if (!err) {
+       if (!err)
                if (EX_ISSYNC(fhp->fh_export))
-                       nfsd_sync_dir(dentry);
-       } else
+                       err = nfsd_sync_dir(dentry);
+       if (err)
                err = nfserrno(err);
        fh_unlock(fhp);
 
@@ -1506,7 +1511,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
        err = vfs_link(dold, dirp, dnew);
        if (!err) {
                if (EX_ISSYNC(ffhp->fh_export)) {
-                       nfsd_sync_dir(ddir);
+                       err = nfserrno(nfsd_sync_dir(ddir));
                        write_inode_now(dest, 1);
                }
        } else {
@@ -1590,13 +1595,14 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
        if ((ffhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
                ((atomic_read(&odentry->d_count) > 1)
                 || (atomic_read(&ndentry->d_count) > 1))) {
-                       err = nfserr_perm;
+                       err = -EPERM;
        } else
 #endif
        err = vfs_rename(fdir, odentry, tdir, ndentry);
        if (!err && EX_ISSYNC(tfhp->fh_export)) {
-               nfsd_sync_dir(tdentry);
-               nfsd_sync_dir(fdentry);
+               err = nfsd_sync_dir(tdentry);
+               if (!err)
+                       err = nfsd_sync_dir(fdentry);
        }
 
  out_dput_new:
@@ -1661,7 +1667,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
 #ifdef MSNFS
                if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
                        (atomic_read(&rdentry->d_count) > 1)) {
-                       err = nfserr_perm;
+                       err = -EPERM;
                } else
 #endif
                err = vfs_unlink(dirp, rdentry);
@@ -1671,17 +1677,14 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
 
        dput(rdentry);
 
-       if (err)
-               goto out_nfserr;
-       if (EX_ISSYNC(fhp->fh_export)) 
-               nfsd_sync_dir(dentry);
-
-out:
-       return err;
+       if (err == 0 &&
+           EX_ISSYNC(fhp->fh_export))
+                       err = nfsd_sync_dir(dentry);
 
 out_nfserr:
        err = nfserrno(err);
-       goto out;
+out:
+       return err;
 }
 
 /*
index 8e20c1f32563f19aeb7386c729e2fc11c3839cfa..70e0230d8e77b79a5becb1cecece91e95a062a2e 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -20,6 +20,7 @@
 #include <linux/security.h>
 #include <linux/mount.h>
 #include <linux/vfs.h>
+#include <linux/fcntl.h>
 #include <asm/uaccess.h>
 #include <linux/fs.h>
 #include <linux/personality.h>
@@ -383,7 +384,7 @@ asmlinkage long sys_utime(char __user * filename, struct utimbuf __user * times)
 
                error = get_user(newattrs.ia_atime.tv_sec, &times->actime);
                newattrs.ia_atime.tv_nsec = 0;
-               if (!error) 
+               if (!error)
                        error = get_user(newattrs.ia_mtime.tv_sec, &times->modtime);
                newattrs.ia_mtime.tv_nsec = 0;
                if (error)
@@ -414,14 +415,14 @@ out:
  * must be owner or have write permission.
  * Else, update from *times, must be owner or super user.
  */
-long do_utimes(char __user * filename, struct timeval * times)
+long do_utimes(int dfd, char __user *filename, struct timeval *times)
 {
        int error;
        struct nameidata nd;
        struct inode * inode;
        struct iattr newattrs;
 
-       error = user_path_walk(filename, &nd);
+       error = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW, &nd);
 
        if (error)
                goto out;
@@ -461,13 +462,18 @@ out:
        return error;
 }
 
-asmlinkage long sys_utimes(char __user * filename, struct timeval __user * utimes)
+asmlinkage long sys_futimesat(int dfd, char __user *filename, struct timeval __user *utimes)
 {
        struct timeval times[2];
 
        if (utimes && copy_from_user(&times, utimes, sizeof(times)))
                return -EFAULT;
-       return do_utimes(filename, utimes ? times : NULL);
+       return do_utimes(dfd, filename, utimes ? times : NULL);
+}
+
+asmlinkage long sys_utimes(char __user *filename, struct timeval __user *utimes)
+{
+       return sys_futimesat(AT_FDCWD, filename, utimes);
 }
 
 
@@ -476,7 +482,7 @@ asmlinkage long sys_utimes(char __user * filename, struct timeval __user * utime
  * We do this by temporarily clearing all FS-related capabilities and
  * switching the fsuid/fsgid around to the real ones.
  */
-asmlinkage long sys_access(const char __user * filename, int mode)
+asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode)
 {
        struct nameidata nd;
        int old_fsuid, old_fsgid;
@@ -506,7 +512,7 @@ asmlinkage long sys_access(const char __user * filename, int mode)
        else
                current->cap_effective = current->cap_permitted;
 
-       res = __user_walk(filename, LOOKUP_FOLLOW|LOOKUP_ACCESS, &nd);
+       res = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW|LOOKUP_ACCESS, &nd);
        if (!res) {
                res = vfs_permission(&nd, mode);
                /* SuS v2 requires we report a read only fs too */
@@ -523,6 +529,11 @@ asmlinkage long sys_access(const char __user * filename, int mode)
        return res;
 }
 
+asmlinkage long sys_access(const char __user *filename, int mode)
+{
+       return sys_faccessat(AT_FDCWD, filename, mode);
+}
+
 asmlinkage long sys_chdir(const char __user * filename)
 {
        struct nameidata nd;
@@ -635,14 +646,15 @@ out:
        return err;
 }
 
-asmlinkage long sys_chmod(const char __user * filename, mode_t mode)
+asmlinkage long sys_fchmodat(int dfd, const char __user *filename,
+                            mode_t mode)
 {
        struct nameidata nd;
        struct inode * inode;
        int error;
        struct iattr newattrs;
 
-       error = user_path_walk(filename, &nd);
+       error = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW, &nd);
        if (error)
                goto out;
        inode = nd.dentry->d_inode;
@@ -669,6 +681,11 @@ out:
        return error;
 }
 
+asmlinkage long sys_chmod(const char __user *filename, mode_t mode)
+{
+       return sys_fchmodat(AT_FDCWD, filename, mode);
+}
+
 static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
 {
        struct inode * inode;
@@ -717,6 +734,26 @@ asmlinkage long sys_chown(const char __user * filename, uid_t user, gid_t group)
        return error;
 }
 
+asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
+                            gid_t group, int flag)
+{
+       struct nameidata nd;
+       int error = -EINVAL;
+       int follow;
+
+       if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
+               goto out;
+
+       follow = (flag & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW;
+       error = __user_walk_fd(dfd, filename, follow, &nd);
+       if (!error) {
+               error = chown_common(nd.dentry, user, group);
+               path_release(&nd);
+       }
+out:
+       return error;
+}
+
 asmlinkage long sys_lchown(const char __user * filename, uid_t user, gid_t group)
 {
        struct nameidata nd;
@@ -820,7 +857,8 @@ cleanup_file:
  * for the internal routines (ie open_namei()/follow_link() etc). 00 is
  * used by symlinks.
  */
-struct file *filp_open(const char * filename, int flags, int mode)
+static struct file *do_filp_open(int dfd, const char *filename, int flags,
+                                int mode)
 {
        int namei_flags, error;
        struct nameidata nd;
@@ -829,12 +867,17 @@ struct file *filp_open(const char * filename, int flags, int mode)
        if ((namei_flags+1) & O_ACCMODE)
                namei_flags++;
 
-       error = open_namei(filename, namei_flags, mode, &nd);
+       error = open_namei(dfd, filename, namei_flags, mode, &nd);
        if (!error)
                return nameidata_to_filp(&nd, flags);
 
        return ERR_PTR(error);
 }
+
+struct file *filp_open(const char *filename, int flags, int mode)
+{
+       return do_filp_open(AT_FDCWD, filename, flags, mode);
+}
 EXPORT_SYMBOL(filp_open);
 
 /**
@@ -991,7 +1034,7 @@ void fastcall put_unused_fd(unsigned int fd)
 EXPORT_SYMBOL(put_unused_fd);
 
 /*
- * Install a file pointer in the fd array.  
+ * Install a file pointer in the fd array.
  *
  * The VFS is full of places where we drop the files lock between
  * setting the open_fds bitmap and installing the file in the file
@@ -1016,7 +1059,7 @@ void fastcall fd_install(unsigned int fd, struct file * file)
 
 EXPORT_SYMBOL(fd_install);
 
-long do_sys_open(const char __user *filename, int flags, int mode)
+long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
 {
        char *tmp = getname(filename);
        int fd = PTR_ERR(tmp);
@@ -1024,7 +1067,7 @@ long do_sys_open(const char __user *filename, int flags, int mode)
        if (!IS_ERR(tmp)) {
                fd = get_unused_fd();
                if (fd >= 0) {
-                       struct file *f = filp_open(tmp, flags, mode);
+                       struct file *f = do_filp_open(dfd, tmp, flags, mode);
                        if (IS_ERR(f)) {
                                put_unused_fd(fd);
                                fd = PTR_ERR(f);
@@ -1043,10 +1086,20 @@ asmlinkage long sys_open(const char __user *filename, int flags, int mode)
        if (force_o_largefile())
                flags |= O_LARGEFILE;
 
-       return do_sys_open(filename, flags, mode);
+       return do_sys_open(AT_FDCWD, filename, flags, mode);
 }
 EXPORT_SYMBOL_GPL(sys_open);
 
+asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
+                          int mode)
+{
+       if (force_o_largefile())
+               flags |= O_LARGEFILE;
+
+       return do_sys_open(dfd, filename, flags, mode);
+}
+EXPORT_SYMBOL_GPL(sys_openat);
+
 #ifndef __alpha__
 
 /*
index f10a10317d5494e0eb4995d9c016113a093f34ee..c0f02d36c60e539c108d4b51fb2ce94762d5ce82 100644 (file)
@@ -179,12 +179,11 @@ get_max:
 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
 #define POLLEX_SET (POLLPRI)
 
-int do_select(int n, fd_set_bits *fds, long *timeout)
+int do_select(int n, fd_set_bits *fds, s64 *timeout)
 {
        struct poll_wqueues table;
        poll_table *wait;
        int retval, i;
-       long __timeout = *timeout;
 
        rcu_read_lock();
        retval = max_select_fd(n, fds);
@@ -196,11 +195,12 @@ int do_select(int n, fd_set_bits *fds, long *timeout)
 
        poll_initwait(&table);
        wait = &table.pt;
-       if (!__timeout)
+       if (!*timeout)
                wait = NULL;
        retval = 0;
        for (;;) {
                unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
+               long __timeout;
 
                set_current_state(TASK_INTERRUPTIBLE);
 
@@ -255,22 +255,32 @@ int do_select(int n, fd_set_bits *fds, long *timeout)
                                *rexp = res_ex;
                }
                wait = NULL;
-               if (retval || !__timeout || signal_pending(current))
+               if (retval || !*timeout || signal_pending(current))
                        break;
                if(table.error) {
                        retval = table.error;
                        break;
                }
+
+               if (*timeout < 0) {
+                       /* Wait indefinitely */
+                       __timeout = MAX_SCHEDULE_TIMEOUT;
+               } else if (unlikely(*timeout >= (s64)MAX_SCHEDULE_TIMEOUT - 1)) {
+                       /* Wait for longer than MAX_SCHEDULE_TIMEOUT. Do it in a loop */
+                       __timeout = MAX_SCHEDULE_TIMEOUT - 1;
+                       *timeout -= __timeout;
+               } else {
+                       __timeout = *timeout;
+                       *timeout = 0;
+               }
                __timeout = schedule_timeout(__timeout);
+               if (*timeout >= 0)
+                       *timeout += __timeout;
        }
        __set_current_state(TASK_RUNNING);
 
        poll_freewait(&table);
 
-       /*
-        * Up-to-date the caller timeout.
-        */
-       *timeout = __timeout;
        return retval;
 }
 
@@ -295,36 +305,14 @@ static void select_bits_free(void *bits, int size)
 #define MAX_SELECT_SECONDS \
        ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
 
-asmlinkage long
-sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
+static int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
+                          fd_set __user *exp, s64 *timeout)
 {
        fd_set_bits fds;
        char *bits;
-       long timeout;
        int ret, size, max_fdset;
        struct fdtable *fdt;
 
-       timeout = MAX_SCHEDULE_TIMEOUT;
-       if (tvp) {
-               time_t sec, usec;
-
-               if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp))
-                   || __get_user(sec, &tvp->tv_sec)
-                   || __get_user(usec, &tvp->tv_usec)) {
-                       ret = -EFAULT;
-                       goto out_nofds;
-               }
-
-               ret = -EINVAL;
-               if (sec < 0 || usec < 0)
-                       goto out_nofds;
-
-               if ((unsigned long) sec < MAX_SELECT_SECONDS) {
-                       timeout = ROUND_UP(usec, 1000000/HZ);
-                       timeout += sec * (unsigned long) HZ;
-               }
-       }
-
        ret = -EINVAL;
        if (n < 0)
                goto out_nofds;
@@ -362,18 +350,7 @@ sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, s
        zero_fd_set(n, fds.res_out);
        zero_fd_set(n, fds.res_ex);
 
-       ret = do_select(n, &fds, &timeout);
-
-       if (tvp && !(current->personality & STICKY_TIMEOUTS)) {
-               time_t sec = 0, usec = 0;
-               if (timeout) {
-                       sec = timeout / HZ;
-                       usec = timeout % HZ;
-                       usec *= (1000000/HZ);
-               }
-               put_user(sec, &tvp->tv_sec);
-               put_user(usec, &tvp->tv_usec);
-       }
+       ret = do_select(n, &fds, timeout);
 
        if (ret < 0)
                goto out;
@@ -395,6 +372,154 @@ out_nofds:
        return ret;
 }
 
+asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
+                       fd_set __user *exp, struct timeval __user *tvp)
+{
+       s64 timeout = -1;
+       struct timeval tv;
+       int ret;
+
+       if (tvp) {
+               if (copy_from_user(&tv, tvp, sizeof(tv)))
+                       return -EFAULT;
+
+               if (tv.tv_sec < 0 || tv.tv_usec < 0)
+                       return -EINVAL;
+
+               /* Cast to u64 to make GCC stop complaining */
+               if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS)
+                       timeout = -1;   /* infinite */
+               else {
+                       timeout = ROUND_UP(tv.tv_usec, USEC_PER_SEC/HZ);
+                       timeout += tv.tv_sec * HZ;
+               }
+       }
+
+       ret = core_sys_select(n, inp, outp, exp, &timeout);
+
+       if (tvp) {
+               if (current->personality & STICKY_TIMEOUTS)
+                       goto sticky;
+               tv.tv_usec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ));
+               tv.tv_sec = timeout;
+               if (copy_to_user(tvp, &tv, sizeof(tv))) {
+sticky:
+                       /*
+                        * If an application puts its timeval in read-only
+                        * memory, we don't want the Linux-specific update to
+                        * the timeval to cause a fault after the select has
+                        * completed successfully. However, because we're not
+                        * updating the timeval, we can't restart the system
+                        * call.
+                        */
+                       if (ret == -ERESTARTNOHAND)
+                               ret = -EINTR;
+               }
+       }
+
+       return ret;
+}
+
+#ifdef TIF_RESTORE_SIGMASK
+asmlinkage long sys_pselect7(int n, fd_set __user *inp, fd_set __user *outp,
+               fd_set __user *exp, struct timespec __user *tsp,
+               const sigset_t __user *sigmask, size_t sigsetsize)
+{
+       s64 timeout = MAX_SCHEDULE_TIMEOUT;
+       sigset_t ksigmask, sigsaved;
+       struct timespec ts;
+       int ret;
+
+       if (tsp) {
+               if (copy_from_user(&ts, tsp, sizeof(ts)))
+                       return -EFAULT;
+
+               if (ts.tv_sec < 0 || ts.tv_nsec < 0)
+                       return -EINVAL;
+
+               /* Cast to u64 to make GCC stop complaining */
+               if ((u64)ts.tv_sec >= (u64)MAX_INT64_SECONDS)
+                       timeout = -1;   /* infinite */
+               else {
+                       timeout = ROUND_UP(ts.tv_nsec, NSEC_PER_SEC/HZ);
+                       timeout += ts.tv_sec * HZ;
+               }
+       }
+
+       if (sigmask) {
+               /* XXX: Don't preclude handling different sized sigset_t's.  */
+               if (sigsetsize != sizeof(sigset_t))
+                       return -EINVAL;
+               if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
+                       return -EFAULT;
+
+               sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
+               sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
+       }
+
+       ret = core_sys_select(n, inp, outp, exp, &timeout);
+
+       if (tsp) {
+               if (current->personality & STICKY_TIMEOUTS)
+                       goto sticky;
+               ts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 1000;
+               ts.tv_sec = timeout;
+               if (copy_to_user(tsp, &ts, sizeof(ts))) {
+sticky:
+                       /*
+                        * If an application puts its timeval in read-only
+                        * memory, we don't want the Linux-specific update to
+                        * the timeval to cause a fault after the select has
+                        * completed successfully. However, because we're not
+                        * updating the timeval, we can't restart the system
+                        * call.
+                        */
+                       if (ret == -ERESTARTNOHAND)
+                               ret = -EINTR;
+               }
+       }
+
+       if (ret == -ERESTARTNOHAND) {
+               /*
+                * Don't restore the signal mask yet. Let do_signal() deliver
+                * the signal on the way back to userspace, before the signal
+                * mask is restored.
+                */
+               if (sigmask) {
+                       memcpy(&current->saved_sigmask, &sigsaved,
+                                       sizeof(sigsaved));
+                       set_thread_flag(TIF_RESTORE_SIGMASK);
+               }
+       } else if (sigmask)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       return ret;
+}
+
+/*
+ * Most architectures can't handle 7-argument syscalls. So we provide a
+ * 6-argument version where the sixth argument is a pointer to a structure
+ * which has a pointer to the sigset_t itself followed by a size_t containing
+ * the sigset size.
+ */
+asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
+       fd_set __user *exp, struct timespec __user *tsp, void __user *sig)
+{
+       size_t sigsetsize = 0;
+       sigset_t __user *up = NULL;
+
+       if (sig) {
+               if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
+                   || __get_user(up, (sigset_t * __user *)sig)
+                   || __get_user(sigsetsize,
+                               (size_t * __user)(sig+sizeof(void *))))
+                       return -EFAULT;
+       }
+
+       return sys_pselect7(n, inp, outp, exp, tsp, up, sigsetsize);
+}
+#endif /* TIF_RESTORE_SIGMASK */
+
 struct poll_list {
        struct poll_list *next;
        int len;
@@ -436,16 +561,19 @@ static void do_pollfd(unsigned int num, struct pollfd * fdpage,
 }
 
 static int do_poll(unsigned int nfds,  struct poll_list *list,
-                       struct poll_wqueues *wait, long timeout)
+                  struct poll_wqueues *wait, s64 *timeout)
 {
        int count = 0;
        poll_table* pt = &wait->pt;
 
-       if (!timeout)
+       /* Optimise the no-wait case */
+       if (!(*timeout))
                pt = NULL;
  
        for (;;) {
                struct poll_list *walk;
+               long __timeout;
+
                set_current_state(TASK_INTERRUPTIBLE);
                walk = list;
                while(walk != NULL) {
@@ -453,18 +581,36 @@ static int do_poll(unsigned int nfds,  struct poll_list *list,
                        walk = walk->next;
                }
                pt = NULL;
-               if (count || !timeout || signal_pending(current))
+               if (count || !*timeout || signal_pending(current))
                        break;
                count = wait->error;
                if (count)
                        break;
-               timeout = schedule_timeout(timeout);
+
+               if (*timeout < 0) {
+                       /* Wait indefinitely */
+                       __timeout = MAX_SCHEDULE_TIMEOUT;
+               } else if (unlikely(*timeout >= (s64)MAX_SCHEDULE_TIMEOUT-1)) {
+                       /*
+                        * Wait for longer than MAX_SCHEDULE_TIMEOUT. Do it in
+                        * a loop
+                        */
+                       __timeout = MAX_SCHEDULE_TIMEOUT - 1;
+                       *timeout -= __timeout;
+               } else {
+                       __timeout = *timeout;
+                       *timeout = 0;
+               }
+
+               __timeout = schedule_timeout(__timeout);
+               if (*timeout >= 0)
+                       *timeout += __timeout;
        }
        __set_current_state(TASK_RUNNING);
        return count;
 }
 
-asmlinkage long sys_poll(struct pollfd __user * ufds, unsigned int nfds, long timeout)
+int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, s64 *timeout)
 {
        struct poll_wqueues table;
        int fdcount, err;
@@ -482,14 +628,6 @@ asmlinkage long sys_poll(struct pollfd __user * ufds, unsigned int nfds, long ti
        if (nfds > max_fdset && nfds > OPEN_MAX)
                return -EINVAL;
 
-       if (timeout) {
-               /* Careful about overflow in the intermediate values */
-               if ((unsigned long) timeout < MAX_SCHEDULE_TIMEOUT / HZ)
-                       timeout = (unsigned long)(timeout*HZ+999)/1000+1;
-               else /* Negative or overflow */
-                       timeout = MAX_SCHEDULE_TIMEOUT;
-       }
-
        poll_initwait(&table);
 
        head = NULL;
@@ -519,6 +657,7 @@ asmlinkage long sys_poll(struct pollfd __user * ufds, unsigned int nfds, long ti
                }
                i -= pp->len;
        }
+
        fdcount = do_poll(nfds, head, &table, timeout);
 
        /* OK, now copy the revents fields back to user space. */
@@ -547,3 +686,98 @@ out_fds:
        poll_freewait(&table);
        return err;
 }
+
+asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
+                       long timeout_msecs)
+{
+       s64 timeout_jiffies = 0;
+
+       if (timeout_msecs) {
+#if HZ > 1000
+               /* We can only overflow if HZ > 1000 */
+               if (timeout_msecs / 1000 > (s64)0x7fffffffffffffffULL / (s64)HZ)
+                       timeout_jiffies = -1;
+               else
+#endif
+                       timeout_jiffies = msecs_to_jiffies(timeout_msecs);
+       }
+
+       return do_sys_poll(ufds, nfds, &timeout_jiffies);
+}
+
+#ifdef TIF_RESTORE_SIGMASK
+asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
+       struct timespec __user *tsp, const sigset_t __user *sigmask,
+       size_t sigsetsize)
+{
+       sigset_t ksigmask, sigsaved;
+       struct timespec ts;
+       s64 timeout = -1;
+       int ret;
+
+       if (tsp) {
+               if (copy_from_user(&ts, tsp, sizeof(ts)))
+                       return -EFAULT;
+
+               /* Cast to u64 to make GCC stop complaining */
+               if ((u64)ts.tv_sec >= (u64)MAX_INT64_SECONDS)
+                       timeout = -1;   /* infinite */
+               else {
+                       timeout = ROUND_UP(ts.tv_nsec, NSEC_PER_SEC/HZ);
+                       timeout += ts.tv_sec * HZ;
+               }
+       }
+
+       if (sigmask) {
+               /* XXX: Don't preclude handling different sized sigset_t's.  */
+               if (sigsetsize != sizeof(sigset_t))
+                       return -EINVAL;
+               if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
+                       return -EFAULT;
+
+               sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
+               sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
+       }
+
+       ret = do_sys_poll(ufds, nfds, &timeout);
+
+       /* We can restart this syscall, usually */
+       if (ret == -EINTR) {
+               /*
+                * Don't restore the signal mask yet. Let do_signal() deliver
+                * the signal on the way back to userspace, before the signal
+                * mask is restored.
+                */
+               if (sigmask) {
+                       memcpy(&current->saved_sigmask, &sigsaved,
+                                       sizeof(sigsaved));
+                       set_thread_flag(TIF_RESTORE_SIGMASK);
+               }
+               ret = -ERESTARTNOHAND;
+       } else if (sigmask)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       if (tsp && timeout >= 0) {
+               if (current->personality & STICKY_TIMEOUTS)
+                       goto sticky;
+               /* Yes, we know it's actually an s64, but it's also positive. */
+               ts.tv_nsec = jiffies_to_usecs(do_div((*(u64*)&timeout), HZ)) * 1000;
+               ts.tv_sec = timeout;
+               if (copy_to_user(tsp, &ts, sizeof(ts))) {
+               sticky:
+                       /*
+                        * If an application puts its timeval in read-only
+                        * memory, we don't want the Linux-specific update to
+                        * the timeval to cause a fault after the select has
+                        * completed successfully. However, because we're not
+                        * updating the timeval, we can't restart the system
+                        * call.
+                        */
+                       if (ret == -ERESTARTNOHAND && timeout >= 0)
+                               ret = -EINTR;
+               }
+       }
+
+       return ret;
+}
+#endif /* TIF_RESTORE_SIGMASK */
index b8a0e5110ab26f90bf9a9acf336489179d162763..24211b030f393e8ba18aea474efbd4bb4a71031b 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -63,12 +63,12 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
 
 EXPORT_SYMBOL(vfs_getattr);
 
-int vfs_stat(char __user *name, struct kstat *stat)
+int vfs_stat_fd(int dfd, char __user *name, struct kstat *stat)
 {
        struct nameidata nd;
        int error;
 
-       error = user_path_walk(name, &nd);
+       error = __user_walk_fd(dfd, name, LOOKUP_FOLLOW, &nd);
        if (!error) {
                error = vfs_getattr(nd.mnt, nd.dentry, stat);
                path_release(&nd);
@@ -76,14 +76,19 @@ int vfs_stat(char __user *name, struct kstat *stat)
        return error;
 }
 
+int vfs_stat(char __user *name, struct kstat *stat)
+{
+       return vfs_stat_fd(AT_FDCWD, name, stat);
+}
+
 EXPORT_SYMBOL(vfs_stat);
 
-int vfs_lstat(char __user *name, struct kstat *stat)
+int vfs_lstat_fd(int dfd, char __user *name, struct kstat *stat)
 {
        struct nameidata nd;
        int error;
 
-       error = user_path_walk_link(name, &nd);
+       error = __user_walk_fd(dfd, name, 0, &nd);
        if (!error) {
                error = vfs_getattr(nd.mnt, nd.dentry, stat);
                path_release(&nd);
@@ -91,6 +96,11 @@ int vfs_lstat(char __user *name, struct kstat *stat)
        return error;
 }
 
+int vfs_lstat(char __user *name, struct kstat *stat)
+{
+       return vfs_lstat_fd(AT_FDCWD, name, stat);
+}
+
 EXPORT_SYMBOL(vfs_lstat);
 
 int vfs_fstat(unsigned int fd, struct kstat *stat)
@@ -151,7 +161,7 @@ static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * sta
 asmlinkage long sys_stat(char __user * filename, struct __old_kernel_stat __user * statbuf)
 {
        struct kstat stat;
-       int error = vfs_stat(filename, &stat);
+       int error = vfs_stat_fd(AT_FDCWD, filename, &stat);
 
        if (!error)
                error = cp_old_stat(&stat, statbuf);
@@ -161,7 +171,7 @@ asmlinkage long sys_stat(char __user * filename, struct __old_kernel_stat __user
 asmlinkage long sys_lstat(char __user * filename, struct __old_kernel_stat __user * statbuf)
 {
        struct kstat stat;
-       int error = vfs_lstat(filename, &stat);
+       int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);
 
        if (!error)
                error = cp_old_stat(&stat, statbuf);
@@ -229,27 +239,50 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
        return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
 }
 
-asmlinkage long sys_newstat(char __user * filename, struct stat __user * statbuf)
+asmlinkage long sys_newstat(char __user *filename, struct stat __user *statbuf)
 {
        struct kstat stat;
-       int error = vfs_stat(filename, &stat);
+       int error = vfs_stat_fd(AT_FDCWD, filename, &stat);
 
        if (!error)
                error = cp_new_stat(&stat, statbuf);
 
        return error;
 }
-asmlinkage long sys_newlstat(char __user * filename, struct stat __user * statbuf)
+
+asmlinkage long sys_newlstat(char __user *filename, struct stat __user *statbuf)
 {
        struct kstat stat;
-       int error = vfs_lstat(filename, &stat);
+       int error = vfs_lstat_fd(AT_FDCWD, filename, &stat);
 
        if (!error)
                error = cp_new_stat(&stat, statbuf);
 
        return error;
 }
-asmlinkage long sys_newfstat(unsigned int fd, struct stat __user * statbuf)
+
+asmlinkage long sys_newfstatat(int dfd, char __user *filename,
+                               struct stat __user *statbuf, int flag)
+{
+       struct kstat stat;
+       int error = -EINVAL;
+
+       if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
+               goto out;
+
+       if (flag & AT_SYMLINK_NOFOLLOW)
+               error = vfs_lstat_fd(dfd, filename, &stat);
+       else
+               error = vfs_stat_fd(dfd, filename, &stat);
+
+       if (!error)
+               error = cp_new_stat(&stat, statbuf);
+
+out:
+       return error;
+}
+
+asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf)
 {
        struct kstat stat;
        int error = vfs_fstat(fd, &stat);
@@ -260,7 +293,8 @@ asmlinkage long sys_newfstat(unsigned int fd, struct stat __user * statbuf)
        return error;
 }
 
-asmlinkage long sys_readlink(const char __user * path, char __user * buf, int bufsiz)
+asmlinkage long sys_readlinkat(int dfd, const char __user *path,
+                               char __user *buf, int bufsiz)
 {
        struct nameidata nd;
        int error;
@@ -268,7 +302,7 @@ asmlinkage long sys_readlink(const char __user * path, char __user * buf, int bu
        if (bufsiz <= 0)
                return -EINVAL;
 
-       error = user_path_walk_link(path, &nd);
+       error = __user_walk_fd(dfd, path, 0, &nd);
        if (!error) {
                struct inode * inode = nd.dentry->d_inode;
 
@@ -285,6 +319,12 @@ asmlinkage long sys_readlink(const char __user * path, char __user * buf, int bu
        return error;
 }
 
+asmlinkage long sys_readlink(const char __user *path, char __user *buf,
+                               int bufsiz)
+{
+       return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
+}
+
 
 /* ---------- LFS-64 ----------- */
 #ifdef __ARCH_WANT_STAT64
index d1db8c17a74e8eb7b5ccd0a0a971808d1960606f..120626789406e836708b5050b6c14cce85341cbe 100644 (file)
@@ -336,24 +336,47 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
 }
 
 /*
- * Submit all of the bios for all of the ioends we have saved up,
- * covering the initial writepage page and also any probed pages.
+ * Submit all of the bios for all of the ioends we have saved up, covering the
+ * initial writepage page and also any probed pages.
+ *
+ * Because we may have multiple ioends spanning a page, we need to start
+ * writeback on all the buffers before we submit them for I/O. If we mark the
+ * buffers as we got, then we can end up with a page that only has buffers
+ * marked async write and I/O complete on can occur before we mark the other
+ * buffers async write.
+ *
+ * The end result of this is that we trip a bug in end_page_writeback() because
+ * we call it twice for the one page as the code in end_buffer_async_write()
+ * assumes that all buffers on the page are started at the same time.
+ *
+ * The fix is two passes across the ioend list - one to start writeback on the
+ * bufferheads, and then the second one submit them for I/O.
  */
 STATIC void
 xfs_submit_ioend(
        xfs_ioend_t             *ioend)
 {
+       xfs_ioend_t             *head = ioend;
        xfs_ioend_t             *next;
        struct buffer_head      *bh;
        struct bio              *bio;
        sector_t                lastblock = 0;
 
+       /* Pass 1 - start writeback */
+       do {
+               next = ioend->io_list;
+               for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
+                       xfs_start_buffer_writeback(bh);
+               }
+       } while ((ioend = next) != NULL);
+
+       /* Pass 2 - submit I/O */
+       ioend = head;
        do {
                next = ioend->io_list;
                bio = NULL;
 
                for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
-                       xfs_start_buffer_writeback(bh);
 
                        if (!bio) {
  retry:
index 740c297eb11c1c4f568e72afe8f2cf56a526b74c..46a0402696de913ec357e79548a34ac37ee3446d 100644 (file)
@@ -38,8 +38,6 @@ struct clk {
 struct clk_functions {
        int             (*clk_enable)(struct clk *clk);
        void            (*clk_disable)(struct clk *clk);
-       int             (*clk_use)(struct clk *clk);
-       void            (*clk_unuse)(struct clk *clk);
        long            (*clk_round_rate)(struct clk *clk, unsigned long rate);
        int             (*clk_set_rate)(struct clk *clk, unsigned long rate);
        int             (*clk_set_parent)(struct clk *clk, struct clk *parent);
index dae138b9cac5033470efe6284a3fa6d51b34db1b..1409c5bd703f0ae30911bfde802b59255a49739a 100644 (file)
 #define DCSR_STARTINTR (1 << 1)        /* Start Interrupt (read / write) */
 #define DCSR_BUSERR    (1 << 0)        /* Bus Error Interrupt (read / write) */
 
+#define DALGN          __REG(0x400000a0)  /* DMA Alignment Register */
 #define DINT           __REG(0x400000f0)  /* DMA Interrupt Register */
 
 #define DRCMR(n)       __REG2(0x40000100, (n)<<2)
 #define SSCR0_National (0x2 << 4)      /* National Microwire */
 #define SSCR0_ECS      (1 << 6)        /* External clock select */
 #define SSCR0_SSE      (1 << 7)        /* Synchronous Serial Port Enable */
+#if defined(CONFIG_PXA25x)
 #define SSCR0_SCR      (0x0000ff00)    /* Serial Clock Rate (mask) */
 #define SSCR0_SerClkDiv(x) ((((x) - 2)/2) << 8) /* Divisor [2..512] */
+#elif defined(CONFIG_PXA27x)
+#define SSCR0_SCR      (0x000fff00)    /* Serial Clock Rate (mask) */
+#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
+#define SSCR0_EDSS     (1 << 20)       /* Extended data size select */
+#define SSCR0_NCS      (1 << 21)       /* Network clock select */
+#define SSCR0_RIM      (1 << 22)       /* Receive FIFO overrrun interrupt mask */
+#define SSCR0_TUM      (1 << 23)       /* Transmit FIFO underrun interrupt mask */
+#define SSCR0_FRDC     (0x07000000)    /* Frame rate divider control (mask) */
+#define SSCR0_SlotsPerFrm(c) ((x) - 1) /* Time slots per frame [1..8] */
+#define SSCR0_ADC      (1 << 30)       /* Audio clock select */
+#define SSCR0_MOD      (1 << 31)       /* Mode (normal or network) */
+#endif
 
 #define SSCR1_RIE      (1 << 0)        /* Receive FIFO Interrupt Enable */
 #define SSCR1_TIE      (1 << 1)        /* Transmit FIFO Interrupt Enable */
index abfbe45cd17c9ff5c64b2e871bd00cc2682eb309..5f8223e700d353f22a75c94c3e173f14ad609b78 100644 (file)
@@ -25,7 +25,7 @@
                .macro addruart, rx
                mrc     p15, 0, \rx, c1, c0
                tst     \rx, #1
-               ldreq   \rx, = S3C2410_PA_UART
+               ldreq   \rx, = S3C24XX_PA_UART
                ldrne   \rx, = S3C24XX_VA_UART
 #if CONFIG_DEBUG_S3C2410_UART != 0
                add     \rx, \rx, #(S3C2410_UART1_OFF * CONFIG_DEBUG_S3C2410_UART)
@@ -44,7 +44,7 @@
 1003:
                mrc     p15, 0, \rd, c1, c0
                tst     \rd, #1
-               addeq   \rd, \rx, #(S3C2410_PA_GPIO - S3C2410_PA_UART)
+               addeq   \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART)
                addne   \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART)
                bic     \rd, \rd, #0xff000
                ldr     \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ]
@@ -75,7 +75,7 @@
 1003:
                mrc     p15, 0, \rd, c1, c0
                tst     \rd, #1
-               addeq   \rd, \rx, #(S3C2410_PA_GPIO - S3C2410_PA_UART)
+               addeq   \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART)
                addne   \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART)
                bic     \rd, \rd, #0xff000
                ldr     \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ]
index 1833ea5c4220b34d2594189e61caa4ab444ca84d..c380d264a8479137fe2a979b91d9969fc64cac03 100644 (file)
@@ -14,6 +14,7 @@
  *  06-Jan-2003 BJD   Linux 2.6.0 version, moved bast specifics out
  *  10-Feb-2005 BJD   Added CAMIF definition from guillaume.gourat@nexvision.tv
  *  10-Mar-2005 LCVR  Added support to S3C2400, changed {VA,SZ} names
+ *  15-Jan-2006 LCVR  Added S3C24XX_PA macros for common S3C24XX resources
 */
 
 #ifndef __ASM_ARCH_MAP_H
 
 #define S3C2400_SDRAM_PA    (S3C2400_CS6)
 
+/* Use a single interface for common resources between S3C24XX cpus */
+
+#ifdef CONFIG_CPU_S3C2400
+#define S3C24XX_PA_IRQ      S3C2400_PA_IRQ
+#define S3C24XX_PA_MEMCTRL  S3C2400_PA_MEMCTRL
+#define S3C24XX_PA_USBHOST  S3C2400_PA_USBHOST
+#define S3C24XX_PA_DMA      S3C2400_PA_DMA
+#define S3C24XX_PA_CLKPWR   S3C2400_PA_CLKPWR
+#define S3C24XX_PA_LCD      S3C2400_PA_LCD
+#define S3C24XX_PA_UART     S3C2400_PA_UART
+#define S3C24XX_PA_TIMER    S3C2400_PA_TIMER
+#define S3C24XX_PA_USBDEV   S3C2400_PA_USBDEV
+#define S3C24XX_PA_WATCHDOG S3C2400_PA_WATCHDOG
+#define S3C24XX_PA_IIC      S3C2400_PA_IIC
+#define S3C24XX_PA_IIS      S3C2400_PA_IIS
+#define S3C24XX_PA_GPIO     S3C2400_PA_GPIO
+#define S3C24XX_PA_RTC      S3C2400_PA_RTC
+#define S3C24XX_PA_ADC      S3C2400_PA_ADC
+#define S3C24XX_PA_SPI      S3C2400_PA_SPI
+#else
+#define S3C24XX_PA_IRQ      S3C2410_PA_IRQ
+#define S3C24XX_PA_MEMCTRL  S3C2410_PA_MEMCTRL
+#define S3C24XX_PA_USBHOST  S3C2410_PA_USBHOST
+#define S3C24XX_PA_DMA      S3C2410_PA_DMA
+#define S3C24XX_PA_CLKPWR   S3C2410_PA_CLKPWR
+#define S3C24XX_PA_LCD      S3C2410_PA_LCD
+#define S3C24XX_PA_UART     S3C2410_PA_UART
+#define S3C24XX_PA_TIMER    S3C2410_PA_TIMER
+#define S3C24XX_PA_USBDEV   S3C2410_PA_USBDEV
+#define S3C24XX_PA_WATCHDOG S3C2410_PA_WATCHDOG
+#define S3C24XX_PA_IIC      S3C2410_PA_IIC
+#define S3C24XX_PA_IIS      S3C2410_PA_IIS
+#define S3C24XX_PA_GPIO     S3C2410_PA_GPIO
+#define S3C24XX_PA_RTC      S3C2410_PA_RTC
+#define S3C24XX_PA_ADC      S3C2410_PA_ADC
+#define S3C24XX_PA_SPI      S3C2410_PA_SPI
+#endif
 
 #endif /* __ASM_ARCH_MAP_H */
index ce1bbbaad6d36a6876fdb2534ebfb2103970b90b..83b01254c4ac7374421cbad670debe9424ef42cc 100644 (file)
@@ -39,9 +39,9 @@
 #define S3C24XX_VA_UART1      (S3C24XX_VA_UART + 0x4000 )
 #define S3C24XX_VA_UART2      (S3C24XX_VA_UART + 0x8000 )
 
-#define S3C2410_PA_UART0      (S3C2410_PA_UART)
-#define S3C2410_PA_UART1      (S3C2410_PA_UART + 0x4000 )
-#define S3C2410_PA_UART2      (S3C2410_PA_UART + 0x8000 )
+#define S3C2410_PA_UART0      (S3C24XX_PA_UART)
+#define S3C2410_PA_UART1      (S3C24XX_PA_UART + 0x4000 )
+#define S3C2410_PA_UART2      (S3C24XX_PA_UART + 0x8000 )
 
 #define S3C2410_URXH     (0x24)
 #define S3C2410_UTXH     (0x20)
index ddd1578a7ee0a40d2877434176f7718797c9b1a4..4367ec054b51aeb277040bbee9c5f47bb99b5762 100644 (file)
 #undef S3C2410_GPIOREG
 #undef S3C2410_WDOGREG
 
-#define S3C2410_GPIOREG(x) ((S3C2410_PA_GPIO + (x)))
-#define S3C2410_WDOGREG(x) ((S3C2410_PA_WATCHDOG + (x)))
+#define S3C2410_GPIOREG(x) ((S3C24XX_PA_GPIO + (x)))
+#define S3C2410_WDOGREG(x) ((S3C24XX_PA_WATCHDOG + (x)))
 
 /* how many bytes we allow into the FIFO at a time in FIFO mode */
 #define FIFO_MAX        (14)
 
-#define uart_base S3C2410_PA_UART + (0x4000*CONFIG_S3C2410_LOWLEVEL_UART_PORT)
+#define uart_base S3C24XX_PA_UART + (0x4000*CONFIG_S3C2410_LOWLEVEL_UART_PORT)
 
 static __inline__ void
 uart_wr(unsigned int reg, unsigned int val)
index 3351b77fab36e93bc86ef94c74a45a7b6b4295a4..e8ea67c97c73d3526b3c7025dbc9a468e1ef6331 100644 (file)
@@ -26,6 +26,7 @@ struct meminfo;
 #define MT_MEMORY              5
 #define MT_ROM                 6
 #define MT_IXP2000_DEVICE      7
+#define MT_NONSHARED_DEVICE    8
 
 extern void create_memmap_holes(struct meminfo *);
 extern void memtable_init(struct meminfo *);
index 5a0d19b466b0cd2242bbf6f1fca244ec90d12617..70e00d08345ec93d6b989fc47b2a5099272375d6 100644 (file)
@@ -168,6 +168,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 #define PMD_SECT_WB            (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
 #define PMD_SECT_MINICACHE     (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
 #define PMD_SECT_WBWA          (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
 
 /*
  *   - coarse table (not used)
index a5576e02dd1d08a8bb4d4970e641751b17e05a87..ea426abf01d39449d58d3356ab53d4e5dec9ed3c 100644 (file)
@@ -129,6 +129,7 @@ register struct thread_info *__current_thread_info asm("gr15");
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
 #define TIF_SINGLESTEP         4       /* restore singlestep on return to user mode */
 #define TIF_IRET               5       /* return with iret */
+#define TIF_RESTORE_SIGMASK    6       /* restore signal mask in do_signal() */
 #define TIF_POLLING_NRFLAG     16      /* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE             17      /* OOM killer killed process */
 
@@ -138,6 +139,7 @@ register struct thread_info *__current_thread_info asm("gr15");
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
 #define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
 #define _TIF_IRET              (1 << TIF_IRET)
+#define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 
 #define _TIF_WORK_MASK         0x0000FFFE      /* work to do on interrupt/exception return */
index cde376a7a85733e57dda51e8569bd16389ad6b6b..4d994d2e99e30e50a7ed68da17914abfa939cefa 100644 (file)
@@ -486,6 +486,7 @@ static inline pid_t wait(int * wait_stat)
 /* #define __ARCH_WANT_SYS_SIGPENDING */
 #define __ARCH_WANT_SYS_SIGPROCMASK
 #define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 #endif
 
 /*
diff --git a/include/asm-i386/edac.h b/include/asm-i386/edac.h
new file mode 100644 (file)
index 0000000..3e7dd0a
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+
+/* ECC atomic, DMA, SMP and interrupt safe scrub function */
+
+static __inline__ void atomic_scrub(void *va, u32 size)
+{
+       unsigned long *virt_addr = va;
+       u32 i;
+
+       for (i = 0; i < size / 4; i++, virt_addr++)
+               /* Very carefully read and write to memory atomically
+                * so we are interrupt, DMA and SMP safe.
+                */
+               __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
+}
+
+#endif
index e7a271d393099b3909cc329bf9fa90bbd2bb7e04..44b9db806474b9faabf19d9b6323b23279b4d0c6 100644 (file)
@@ -61,7 +61,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (op == FUTEX_OP_SET)
                __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
        else {
-#if !defined(CONFIG_X86_BSWAP) && !defined(CONFIG_UML)
+#ifndef CONFIG_X86_BSWAP
                if (boot_cpu_data.x86 == 3)
                        ret = -ENOSYS;
                else
index 76524b4052ac02b071bb40889728c97af91cea41..026fd231488dc9729302a7b02de7d463fa07c35d 100644 (file)
@@ -218,7 +218,6 @@ static __inline__ int sigfindinword(unsigned long word)
 }
 
 struct pt_regs;
-extern int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
 
 #define ptrace_signal_deliver(regs, cookie)            \
        do {                                            \
index 2493e77e8c3052da1da84626d187cc5a312f922f..e20e99551d710e73f634664731ea48be50bc6ab9 100644 (file)
@@ -140,6 +140,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
 #define TIF_SYSCALL_EMU                6       /* syscall emulation active */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_SECCOMP            8       /* secure computing */
+#define TIF_RESTORE_SIGMASK    9       /* restore signal mask in do_signal() */
 #define TIF_POLLING_NRFLAG     16      /* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE             17
 
@@ -152,6 +153,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
 #define _TIF_SYSCALL_EMU       (1<<TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT     (1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP           (1<<TIF_SECCOMP)
+#define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
 
 /* work to do on interrupt/exception return */
index 481c3c0ea720d32c1570a3588ef31da4ac81247b..597496ed2aeea58efd01ee6f62b98cac6183747d 100644 (file)
 #define __NR_inotify_add_watch 292
 #define __NR_inotify_rm_watch  293
 #define __NR_migrate_pages     294
+#define __NR_openat            295
+#define __NR_mkdirat           296
+#define __NR_mknodat           297
+#define __NR_fchownat          298
+#define __NR_futimesat         299
+#define __NR_newfstatat                300
+#define __NR_unlinkat          301
+#define __NR_renameat          302
+#define __NR_linkat            303
+#define __NR_symlinkat         304
+#define __NR_readlinkat                305
+#define __NR_fchmodat          306
+#define __NR_faccessat         307
+#define __NR_pselect6          308
+#define __NR_ppoll             309
 
-#define NR_syscalls 295
+#define NR_syscalls 310
 
 /*
  * user-visible error numbers are in the range -1 - -128: see
@@ -417,6 +432,7 @@ __syscall_return(type,__res); \
 #define __ARCH_WANT_SYS_SIGPENDING
 #define __ARCH_WANT_SYS_SIGPROCMASK
 #define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 #endif
 
 #ifdef __KERNEL_SYSCALLS__
index bb8906285fab24104df8acf9c7f3ba1c35733ccf..f483eeb95dd1dabfd248803492bb0a6a7561b6ec 100644 (file)
@@ -61,7 +61,7 @@ static inline void
 down (struct semaphore *sem)
 {
        might_sleep();
-       if (atomic_dec_return(&sem->count) < 0)
+       if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
                __down(sem);
 }
 
@@ -75,7 +75,7 @@ down_interruptible (struct semaphore * sem)
        int ret = 0;
 
        might_sleep();
-       if (atomic_dec_return(&sem->count) < 0)
+       if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
                ret = __down_interruptible(sem);
        return ret;
 }
@@ -85,7 +85,7 @@ down_trylock (struct semaphore *sem)
 {
        int ret = 0;
 
-       if (atomic_dec_return(&sem->count) < 0)
+       if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
                ret = __down_trylock(sem);
        return ret;
 }
@@ -93,7 +93,7 @@ down_trylock (struct semaphore *sem)
 static inline void
 up (struct semaphore * sem)
 {
-       if (atomic_inc_return(&sem->count) <= 0)
+       if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1)
                __up(sem);
 }
 
index e68a80853d5dfaf81beb54d45727e495ba8f013e..9ca642cad33878a0a3de28c74ae639998099ea86 100644 (file)
@@ -8,7 +8,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2005 Silicon Graphics, Inc.  All rights reserved.
+ * Copyright (c) 2005-2006 Silicon Graphics, Inc.  All rights reserved.
  */
 
 
@@ -27,14 +27,12 @@ extern int sn_prom_feature_available(int id);
  * "false" for new features.
  *
  * Use:
- *             if (sn_prom_feature_available(PRF_FEATURE_XXX))
+ *             if (sn_prom_feature_available(PRF_XXX))
  *                     ...
  */
 
-/*
- * Example: feature XXX
- */
-#define PRF_FEATURE_XXX                0
+#define PRF_PAL_CACHE_FLUSH_SAFE       0
+#define PRF_DEVICE_FLUSH_LIST          1
 
 
 
@@ -51,7 +49,7 @@ extern int sn_prom_feature_available(int id);
  *
  * By default, features are disabled unless explicitly enabled.
  */
-#define  OSF_MCA_SLV_TO_OS_INIT_SLV            0
-#define  OSF_FEAT_LOG_SBES                     1
+#define  OSF_MCA_SLV_TO_OS_INIT_SLV    0
+#define  OSF_FEAT_LOG_SBES             1
 
 #endif /* _ASM_IA64_SN_FEATURE_SETS_H */
index 203945ae034e1c70e5a8aee672a6b40e22bbfdc1..9bd2f9bf329b39538f5d762a391a0baf98eba980 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/cache.h>
 #include <linux/hardirq.h>
+#include <linux/mutex.h>
 #include <asm/sn/types.h>
 #include <asm/sn/bte.h>
 
@@ -359,7 +360,7 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
  * the channel.
  */
 struct xpc_registration {
-       struct semaphore sema;
+       struct mutex mutex;
        xpc_channel_func func;          /* function to call */
        void *key;                      /* pointer to user's key */
        u16 nentries;                   /* #of msg entries in local msg queue */
index 87e9cd5885108249657471cdf3a5535f91f1c549..0c36928ffd8b56c1e67ff1090124b8b902be126c 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/interrupt.h>
 #include <linux/sysctl.h>
 #include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
 #include <asm/sn/bte.h>
@@ -335,8 +337,7 @@ struct xpc_openclose_args {
  * and consumed by the intended recipient.
  */
 struct xpc_notify {
-       struct semaphore sema;          /* notify semaphore */
-       volatile u8 type;                       /* type of notification */
+       volatile u8 type;               /* type of notification */
 
        /* the following two fields are only used if type == XPC_N_CALL */
        xpc_notify_func func;           /* user's notify function */
@@ -465,8 +466,8 @@ struct xpc_channel {
        xpc_channel_func func;          /* user's channel function */
        void *key;                      /* pointer to user's key */
 
-       struct semaphore msg_to_pull_sema; /* next msg to pull serialization */
-       struct semaphore wdisconnect_sema; /* wait for channel disconnect */
+       struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
+       struct completion wdisconnect_wait; /* wait for channel disconnect */
 
        struct xpc_openclose_args *local_openclose_args; /* args passed on */
                                        /* opening or closing of channel */
index d8aae4da3978945adae91064c3ebbe8d0b29f01b..412ef8e493a8cc2644ad79294f9907ddd8e4566c 100644 (file)
 #include <asm/smp.h>
 
 #ifdef CONFIG_NUMA
+
+/* Nodes w/o CPUs are preferred for memory allocations, see build_zonelists */
+#define PENALTY_FOR_NODE_WITH_CPUS 255
+
 /*
  * Returns the number of the node containing CPU 'cpu'
  */
index cd9f11f1ef14b7ac8f44d2868f5a35b6c7548eca..4dc514aabfe7e8170c3820b587e260b3d6dd5131 100644 (file)
@@ -31,7 +31,7 @@
 
 /* The Hypervisor barfs if the lppaca crosses a page boundary.  A 1k
  * alignment is sufficient to prevent this */
-struct __attribute__((__aligned__(0x400))) lppaca {
+struct lppaca {
 //=============================================================================
 // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
 // NOTE: The xDynXyz fields are fields that will be dynamically changed by
@@ -129,7 +129,7 @@ struct __attribute__((__aligned__(0x400))) lppaca {
 // CACHE_LINE_4-5 0x0100 - 0x01FF Contains PMC interrupt data
 //=============================================================================
        u8      pmc_save_area[256];     // PMC interrupt Area           x00-xFF
-};
+} __attribute__((__aligned__(0x400)));
 
 extern struct lppaca lppaca[];
 
index 7e09d7cda933c2919d16c51adcec0d30f24d39ad..67cdaf3ae9fc6270a5b558ee23e9be13762506d5 100644 (file)
@@ -122,6 +122,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_RESTOREALL         12      /* Restore all regs (implies NOERROR) */
 #define TIF_SAVE_NVGPRS                13      /* Save r14-r31 in signal frame */
 #define TIF_NOERROR            14      /* Force successful syscall return */
+#define TIF_RESTORE_SIGMASK    15      /* Restore signal mask in do_signal */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
@@ -138,10 +139,12 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_RESTOREALL                (1<<TIF_RESTOREALL)
 #define _TIF_SAVE_NVGPRS       (1<<TIF_SAVE_NVGPRS)
 #define _TIF_NOERROR           (1<<TIF_NOERROR)
+#define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
 #define _TIF_SYSCALL_T_OR_A    (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
 
 #define _TIF_USER_WORK_MASK    (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
-                                _TIF_NEED_RESCHED | _TIF_RESTOREALL)
+                                _TIF_NEED_RESCHED | _TIF_RESTOREALL | \
+                                _TIF_RESTORE_SIGMASK)
 #define _TIF_PERSYSCALL_MASK   (_TIF_RESTOREALL|_TIF_NOERROR|_TIF_SAVE_NVGPRS)
 
 #endif /* __KERNEL__ */
index 19eaac3fbbf9ecc6bad44f00def47608f0f13c94..a40cdff21a88e1d335474e5895012dbafadaec3f 100644 (file)
 #define __NR_inotify_rm_watch  277
 #define __NR_spu_run           278
 #define __NR_spu_create                279
+#define __NR_pselect6          280
+#define __NR_ppoll             281
 
-#define __NR_syscalls          280
+#define __NR_syscalls          282
 
 #ifdef __KERNEL__
 #define __NR__exit __NR_exit
@@ -444,11 +446,13 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6
 #define __ARCH_WANT_SYS_SIGPENDING
 #define __ARCH_WANT_SYS_SIGPROCMASK
 #define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 #ifdef CONFIG_PPC32
 #define __ARCH_WANT_OLD_STAT
 #endif
 #ifdef CONFIG_PPC64
 #define __ARCH_WANT_COMPAT_SYS_TIME
+#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
 #endif
 
 /*
index 95944556d8b6cf536e2ce0d4655754aa7f3f0413..d0d76b30eb4cc993a1aa39e313bebe90fffef2fc 100644 (file)
@@ -164,6 +164,7 @@ enum prom_input_device {
        PROMDEV_IKBD,                   /* input from keyboard */
        PROMDEV_ITTYA,                  /* input from ttya */
        PROMDEV_ITTYB,                  /* input from ttyb */
+       PROMDEV_IRSC,                   /* input from rsc */
        PROMDEV_I_UNK,
 };
 
@@ -175,6 +176,7 @@ enum prom_output_device {
        PROMDEV_OSCREEN,                /* to screen */
        PROMDEV_OTTYA,                  /* to ttya */
        PROMDEV_OTTYB,                  /* to ttyb */
+       PROMDEV_ORSC,                   /* to rsc */
        PROMDEV_O_UNK,
 };
 
index 65f060b040abdabab9c9a1efa12001ba60ff804d..91b9f5888c855defa112d38ec20761a6664f1875 100644 (file)
@@ -128,9 +128,10 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
  * thread information flag bit numbers
  */
 #define TIF_SYSCALL_TRACE      0       /* syscall trace active */
-#define TIF_NOTIFY_RESUME      1       /* resumption notification requested */
+/* flag bit 1 is available */
 #define TIF_SIGPENDING         2       /* signal pending */
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
+#define TIF_RESTORE_SIGMASK    4       /* restore signal mask in do_signal() */
 #define TIF_USEDFPU            8       /* FPU was used by this task
                                         * this quantum (SMP) */
 #define TIF_POLLING_NRFLAG     9       /* true if poll_idle() is polling
@@ -139,9 +140,9 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
-#define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
 #define _TIF_SIGPENDING                (1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
+#define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
 #define _TIF_USEDFPU           (1<<TIF_USEDFPU)
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
 
index 58dba518239e712e3ba1daf2cc314500513cc8ca..2ac64e65e336531c8fcd918e52e0edde710f495d 100644 (file)
 #define __NR_add_key           281
 #define __NR_request_key       282
 #define __NR_keyctl            283
+#define __NR_openat            284
+#define __NR_mkdirat           285
+#define __NR_mknodat           286
+#define __NR_fchownat          287
+#define __NR_futimesat         288
+#define __NR_newfstatat                289
+#define __NR_unlinkat          290
+#define __NR_renameat          291
+#define __NR_linkat            292
+#define __NR_symlinkat         293
+#define __NR_readlinkat                294
+#define __NR_fchmodat          295
+#define __NR_faccessat         296
+#define __NR_pselect6          297
+#define __NR_ppoll             298
 
-/* WARNING: You MAY NOT add syscall numbers larger than 283, since
+/* WARNING: You MAY NOT add syscall numbers larger than 298, since
  *          all of the syscall tables in the Sparc kernel are
- *          sized to have 283 entries (starting at zero).  Therefore
- *          find a free slot in the 0-282 range.
+ *          sized to have 298 entries (starting at zero).  Therefore
+ *          find a free slot in the 0-298 range.
  */
 
 #define _syscall0(type,name) \
@@ -458,6 +473,7 @@ return -1; \
 #define __ARCH_WANT_SYS_OLDUMOUNT
 #define __ARCH_WANT_SYS_SIGPENDING
 #define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 #endif
 
 #ifdef __KERNEL_SYSCALLS__
index d02f1e8ae1a6cd9bbbe8c46a47be25ae9aa2ccaa..3c59b2693fb9639df36b711fc057642a342d2e40 100644 (file)
@@ -163,6 +163,7 @@ enum prom_input_device {
        PROMDEV_IKBD,                   /* input from keyboard */
        PROMDEV_ITTYA,                  /* input from ttya */
        PROMDEV_ITTYB,                  /* input from ttyb */
+       PROMDEV_IRSC,                   /* input from rsc */
        PROMDEV_I_UNK,
 };
 
@@ -174,6 +175,7 @@ enum prom_output_device {
        PROMDEV_OSCREEN,                /* to screen */
        PROMDEV_OTTYA,                  /* to ttya */
        PROMDEV_OTTYB,                  /* to ttyb */
+       PROMDEV_ORSC,                   /* to rsc */
        PROMDEV_O_UNK,
 };
 
index ec85d12d73b98a353e0fdcaf346489b09f80839a..508c416e9d6a8b1423ab3bb88eef8ee27d67ec9a 100644 (file)
@@ -131,6 +131,28 @@ static void inline __read_lock(raw_rwlock_t *lock)
        : "memory");
 }
 
+static int inline __read_trylock(raw_rwlock_t *lock)
+{
+       int tmp1, tmp2;
+
+       __asm__ __volatile__ (
+"1:    ldsw            [%2], %0\n"
+"      brlz,a,pn       %0, 2f\n"
+"       mov            0, %0\n"
+"      add             %0, 1, %1\n"
+"      cas             [%2], %0, %1\n"
+"      cmp             %0, %1\n"
+"      membar          #StoreLoad | #StoreStore\n"
+"      bne,pn          %%icc, 1b\n"
+"       mov            1, %0\n"
+"2:"
+       : "=&r" (tmp1), "=&r" (tmp2)
+       : "r" (lock)
+       : "memory");
+
+       return tmp1;
+}
+
 static void inline __read_unlock(raw_rwlock_t *lock)
 {
        unsigned long tmp1, tmp2;
@@ -211,12 +233,12 @@ static int inline __write_trylock(raw_rwlock_t *lock)
 }
 
 #define __raw_read_lock(p)     __read_lock(p)
+#define __raw_read_trylock(p)  __read_trylock(p)
 #define __raw_read_unlock(p)   __read_unlock(p)
 #define __raw_write_lock(p)    __write_lock(p)
 #define __raw_write_unlock(p)  __write_unlock(p)
 #define __raw_write_trylock(p) __write_trylock(p)
 
-#define __raw_read_trylock(lock)       generic__raw_read_trylock(lock)
 #define __raw_read_can_lock(rw)                (!((rw)->lock & 0x80000000UL))
 #define __raw_write_can_lock(rw)       (!(rw)->lock)
 
index c94d8b3991bdde0cf93be99e258f8798298db7e3..ac9d068aab4f43d9beab4e6df2847616f09788a0 100644 (file)
@@ -221,7 +221,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
  *      nop
  */
 #define TIF_SYSCALL_TRACE      0       /* syscall trace active */
-#define TIF_NOTIFY_RESUME      1       /* resumption notification requested */
+#define TIF_RESTORE_SIGMASK    1       /* restore signal mask in do_signal() */
 #define TIF_SIGPENDING         2       /* signal pending */
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
 #define TIF_PERFCTR            4       /* performance counters active */
@@ -241,7 +241,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
 #define TIF_POLLING_NRFLAG     14
 
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
-#define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
 #define _TIF_SIGPENDING                (1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
 #define _TIF_PERFCTR           (1<<TIF_PERFCTR)
@@ -250,11 +249,12 @@ register struct thread_info *current_thread_info_reg asm("g6");
 #define _TIF_32BIT             (1<<TIF_32BIT)
 #define _TIF_SECCOMP           (1<<TIF_SECCOMP)
 #define _TIF_SYSCALL_AUDIT     (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
 #define _TIF_ABI_PENDING       (1<<TIF_ABI_PENDING)
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
 
 #define _TIF_USER_WORK_MASK    ((0xff << TI_FLAG_WSAVED_SHIFT) | \
-                                (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
+                                (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | \
                                  _TIF_NEED_RESCHED | _TIF_PERFCTR))
 
 #endif /* __KERNEL__ */
index 51ec2879b881f7e363e7969d1d527a30aaa171df..84ac2bdb09025abbfd0747aec763bf453541c6ea 100644 (file)
 #define __NR_add_key           281
 #define __NR_request_key       282
 #define __NR_keyctl            283
+#define __NR_openat            284
+#define __NR_mkdirat           285
+#define __NR_mknodat           286
+#define __NR_fchownat          287
+#define __NR_futimesat         288
+#define __NR_newfstatat                289
+#define __NR_unlinkat          290
+#define __NR_renameat          291
+#define __NR_linkat            292
+#define __NR_symlinkat         293
+#define __NR_readlinkat                294
+#define __NR_fchmodat          295
+#define __NR_faccessat         296
+#define __NR_pselect6          297
+#define __NR_ppoll             298
 
-/* WARNING: You MAY NOT add syscall numbers larger than 283, since
+/* WARNING: You MAY NOT add syscall numbers larger than 298, since
  *          all of the syscall tables in the Sparc kernel are
- *          sized to have 283 entries (starting at zero).  Therefore
- *          find a free slot in the 0-282 range.
+ *          sized to have 298 entries (starting at zero).  Therefore
+ *          find a free slot in the 0-298 range.
  */
 
 #define _syscall0(type,name) \
@@ -501,6 +516,8 @@ asmlinkage long sys_rt_sigaction(int sig,
 #define __ARCH_WANT_SYS_OLDUMOUNT
 #define __ARCH_WANT_SYS_SIGPENDING
 #define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
+#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
 #endif
 
 /*
index 90674056dcef4592fd65ab25375531884a2f6e4a..1934d9340e2cd6d5c0ca4c58f4ce1869edf97453 100644 (file)
@@ -33,4 +33,20 @@ static inline void * phys_to_virt(unsigned long address)
  */
 #define xlate_dev_kmem_ptr(p)  p
 
+static inline void writeb(unsigned char b, volatile void __iomem *addr)
+{
+       *(volatile unsigned char __force *) addr = b;
+}
+static inline void writew(unsigned short b, volatile void __iomem *addr)
+{
+       *(volatile unsigned short __force *) addr = b;
+}
+static inline void writel(unsigned int b, volatile void __iomem *addr)
+{
+       *(volatile unsigned int __force *) addr = b;
+}
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+
 #endif
index 705c71972c326ed5057e1683cc2e02a9e0ca0262..17b6b07c4332f01a577981ab8bae819a01a29bd2 100644 (file)
@@ -69,6 +69,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_RESTART_BLOCK      4
 #define TIF_MEMDIE             5
 #define TIF_SYSCALL_AUDIT      6
+#define TIF_RESTORE_SIGMASK    7
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
@@ -76,16 +77,6 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_POLLING_NRFLAG     (1 << TIF_POLLING_NRFLAG)
 #define _TIF_MEMDIE            (1 << TIF_MEMDIE)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 
 #endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
index 6fdde45cc0536ca486224c94dfdc99edebe60d1a..afccfcaa9ea9228d0c744d371815266d7afe0233 100644 (file)
@@ -34,6 +34,7 @@ extern int um_execve(const char *file, char *const argv[], char *const env[]);
 #define __ARCH_WANT_SYS_SIGPENDING
 #define __ARCH_WANT_SYS_SIGPROCMASK
 #define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 #endif
 
 #ifdef __KERNEL_SYSCALLS__
diff --git a/include/asm-x86_64/edac.h b/include/asm-x86_64/edac.h
new file mode 100644 (file)
index 0000000..cad1cd4
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+
+/* ECC atomic, DMA, SMP and interrupt safe scrub function */
+
+static __inline__ void atomic_scrub(void *va, u32 size)
+{
+       unsigned int *virt_addr = va;
+       u32 i;
+
+       for (i = 0; i < size / 4; i++, virt_addr++)
+               /* Very carefully read and write to memory atomically
+                * so we are interrupt, DMA and SMP safe.
+                */
+               __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
+}
+
+#endif
index e8843362a6ccda6011d5ca607ebde4193eab540d..e87cd83a0e86dbbce4e9eb21fa09787400b6d6fb 100644 (file)
 #define __NR_ia32_inotify_add_watch    292
 #define __NR_ia32_inotify_rm_watch     293
 #define __NR_ia32_migrate_pages                294
+#define __NR_ia32_opanat               295
+#define __NR_ia32_mkdirat              296
+#define __NR_ia32_mknodat              297
+#define __NR_ia32_fchownat             298
+#define __NR_ia32_futimesat            299
+#define __NR_ia32_newfstatat           300
+#define __NR_ia32_unlinkat             301
+#define __NR_ia32_renameat             302
+#define __NR_ia32_linkat               303
+#define __NR_ia32_symlinkat            304
+#define __NR_ia32_readlinkat           305
+#define __NR_ia32_fchmodat             306
+#define __NR_ia32_faccessat            307
 
-#define IA32_NR_syscalls 295   /* must be > than biggest syscall! */
+#define IA32_NR_syscalls 308   /* must be > than biggest syscall! */
 
 #endif /* _ASM_X86_64_IA32_UNISTD_H_ */
index e6f896161c1193d60043db9ff6ffebb372e63385..436d099b5b6b162e21aee103525e03222176bab3 100644 (file)
@@ -573,8 +573,35 @@ __SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
 __SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
 #define __NR_migrate_pages     256
 __SYSCALL(__NR_migrate_pages, sys_migrate_pages)
+#define __NR_openat            257
+__SYSCALL(__NR_openat, sys_openat)
+#define __NR_mkdirat           258
+__SYSCALL(__NR_mkdirat, sys_mkdirat)
+#define __NR_mknodat           259
+__SYSCALL(__NR_mknodat, sys_mknodat)
+#define __NR_fchownat          260
+__SYSCALL(__NR_fchownat, sys_fchownat)
+#define __NR_futimesat         261
+__SYSCALL(__NR_futimesat, sys_futimesat)
+#define __NR_newfstatat                262
+__SYSCALL(__NR_newfstatat, sys_newfstatat)
+#define __NR_unlinkat          263
+__SYSCALL(__NR_unlinkat, sys_unlinkat)
+#define __NR_renameat          264
+__SYSCALL(__NR_renameat, sys_renameat)
+#define __NR_linkat            265
+__SYSCALL(__NR_linkat, sys_linkat)
+#define __NR_symlinkat         266
+__SYSCALL(__NR_symlinkat, sys_symlinkat)
+#define __NR_readlinkat                267
+__SYSCALL(__NR_readlinkat, sys_readlinkat)
+#define __NR_fchmodat          268
+__SYSCALL(__NR_fchmodat, sys_fchmodat)
+#define __NR_faccessat         269
+__SYSCALL(__NR_faccessat, sys_faccessat)
+
+#define __NR_syscall_max __NR_faccessat
 
-#define __NR_syscall_max __NR_migrate_pages
 #ifndef __NO_STUBS
 
 /* user-visible error numbers are in the range -1 - -4095 */
index 02a585faa62cfd689510b943717cc6f79101184b..860e7a485a5f08d2aaa10129d65c6339c128a008 100644 (file)
@@ -392,8 +392,8 @@ struct request_queue
        unsigned int            nr_congestion_off;
        unsigned int            nr_batching;
 
-       unsigned short          max_sectors;
-       unsigned short          max_hw_sectors;
+       unsigned int            max_sectors;
+       unsigned int            max_hw_sectors;
        unsigned short          max_phys_segments;
        unsigned short          max_hw_segments;
        unsigned short          hardsect_size;
@@ -697,7 +697,7 @@ extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
 extern void blk_cleanup_queue(request_queue_t *);
 extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
 extern void blk_queue_bounce_limit(request_queue_t *, u64);
-extern void blk_queue_max_sectors(request_queue_t *, unsigned short);
+extern void blk_queue_max_sectors(request_queue_t *, unsigned int);
 extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);
 extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
 extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
index c31650df92412de9c6514e81bc5955e8f8a02dcb..17866d7e2b71ad08e12fc9340d5b77448032da48 100644 (file)
@@ -14,6 +14,7 @@
 #ifndef _LINUX_CPUFREQ_H
 #define _LINUX_CPUFREQ_H
 
+#include <linux/mutex.h>
 #include <linux/config.h>
 #include <linux/notifier.h>
 #include <linux/threads.h>
@@ -82,7 +83,7 @@ struct cpufreq_policy {
         unsigned int           policy; /* see above */
        struct cpufreq_governor *governor; /* see below */
 
-       struct semaphore        lock;   /* CPU ->setpolicy or ->target may
+       struct mutex            lock;   /* CPU ->setpolicy or ->target may
                                           only be called once a time */
 
        struct work_struct      update; /* if update_policy() needs to be
index 8a7c82151de96388fd53c7f06296783bd247b14d..c52a63755fdd5b69e9bd63353b13e6aa82c738c9 100644 (file)
 #define DN_ATTRIB      0x00000020      /* File changed attibutes */
 #define DN_MULTISHOT   0x80000000      /* Don't remove notifier */
 
+#define AT_FDCWD               -100    /* Special value used to indicate
+                                           openat should use the current
+                                           working directory. */
+#define AT_SYMLINK_NOFOLLOW    0x100   /* Do not follow symbolic links.  */
+#define AT_REMOVEDIR           0x200   /* Remove directory instead of
+                                           unlinking file.  */
+
 #ifdef __KERNEL__
 
 #ifndef force_o_largefile
index b77f2608eef92de529977e94d5e379b69c2a4c60..84bb449b9b01734dd864df59008724805de550af 100644 (file)
@@ -1340,7 +1340,8 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
 
 extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
                       struct file *filp);
-extern long do_sys_open(const char __user *filename, int flags, int mode);
+extern long do_sys_open(int fdf, const char __user *filename, int flags,
+                       int mode);
 extern struct file *filp_open(const char *, int, int);
 extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
 extern int filp_close(struct file *, fl_owner_t id);
@@ -1479,7 +1480,7 @@ static inline void allow_write_access(struct file *file)
 }
 extern int do_pipe(int *);
 
-extern int open_namei(const char *, int, int, struct nameidata *);
+extern int open_namei(int dfd, const char *, int, int, struct nameidata *);
 extern int may_open(struct nameidata *, int, int);
 
 extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
@@ -1677,6 +1678,8 @@ extern int vfs_readdir(struct file *, filldir_t, void *);
 
 extern int vfs_stat(char __user *, struct kstat *);
 extern int vfs_lstat(char __user *, struct kstat *);
+extern int vfs_stat_fd(int dfd, char __user *, struct kstat *);
+extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *);
 extern int vfs_fstat(unsigned int, struct kstat *);
 
 extern int vfs_ioctl(struct file *, unsigned int, unsigned int, unsigned long);
index fe26d431de8704710d3ffabd9faa7d2d7fa35539..7a92c1ce1457c6dfbcb7a05fd0c06310df575429 100644 (file)
@@ -72,6 +72,7 @@
                                         * over Ethernet
                                         */
 #define ETH_P_AOE      0x88A2          /* ATA over Ethernet            */
+#define ETH_P_TIPC     0x88CA          /* TIPC                         */
 
 /*
  *     Non DIX types. Won't clash for 1500 types.
index 323924edb26a5f686361bfa6cb188f9b5b4d9641..a5363324cf959bcdf8ff21188aee90040e29622a 100644 (file)
@@ -228,6 +228,7 @@ extern void dump_stack(void);
        ntohs((addr).s6_addr16[6]), \
        ntohs((addr).s6_addr16[7])
 #define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
+#define NIP6_SEQFMT "%04x%04x%04x%04x%04x%04x%04x%04x"
 
 #if defined(__LITTLE_ENDIAN)
 #define HIPQUAD(addr) \
index a43c95f8f968b8e7be0f648ea38a2475b71d3d07..9e5db2949c588c551fa608d801e7cc627bb39844 100644 (file)
@@ -126,16 +126,19 @@ enum {
 
        ATA_FLAG_SUSPENDED      = (1 << 12), /* port is suspended */
 
+       ATA_FLAG_PIO_LBA48      = (1 << 13), /* Host DMA engine is LBA28 only */
+       ATA_FLAG_IRQ_MASK       = (1 << 14), /* Mask IRQ in PIO xfers */
+
        ATA_QCFLAG_ACTIVE       = (1 << 1), /* cmd not yet ack'd to scsi lyer */
        ATA_QCFLAG_SG           = (1 << 3), /* have s/g table? */
        ATA_QCFLAG_SINGLE       = (1 << 4), /* no s/g, just a single buffer */
        ATA_QCFLAG_DMAMAP       = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
 
        /* various lengths of time */
-       ATA_TMOUT_EDD           = 5 * HZ,       /* hueristic */
+       ATA_TMOUT_EDD           = 5 * HZ,       /* heuristic */
        ATA_TMOUT_PIO           = 30 * HZ,
-       ATA_TMOUT_BOOT          = 30 * HZ,      /* hueristic */
-       ATA_TMOUT_BOOT_QUICK    = 7 * HZ,       /* hueristic */
+       ATA_TMOUT_BOOT          = 30 * HZ,      /* heuristic */
+       ATA_TMOUT_BOOT_QUICK    = 7 * HZ,       /* heuristic */
        ATA_TMOUT_CDB           = 30 * HZ,
        ATA_TMOUT_CDB_QUICK     = 5 * HZ,
        ATA_TMOUT_INTERNAL      = 30 * HZ,
@@ -499,6 +502,8 @@ extern int ata_scsi_slave_config(struct scsi_device *sdev);
 /*
  * Timing helpers
  */
+
+extern unsigned int ata_pio_need_iordy(const struct ata_device *);
 extern int ata_timing_compute(struct ata_device *, unsigned short,
                              struct ata_timing *, int, int);
 extern void ata_timing_merge(const struct ata_timing *,
index d6a53ed6ab6c530c490ee43d5fd6a67faa70f48a..bbd2221923c3a274dae0d13d1425942108831d6d 100644 (file)
@@ -159,6 +159,7 @@ extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
 extern struct mempolicy default_policy;
 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
                unsigned long addr);
+extern unsigned slab_node(struct mempolicy *policy);
 
 extern int policy_zone;
 
index 49cc68af01f8e6d971d870fd952d229d31810628..8ac854f7f190aa16cefbb52d4f9fa60fb6c3de64 100644 (file)
@@ -39,24 +39,3 @@ del_page_from_lru(struct zone *zone, struct page *page)
        }
 }
 
-/*
- * Isolate one page from the LRU lists.
- *
- * - zone->lru_lock must be held
- */
-static inline int __isolate_lru_page(struct page *page)
-{
-       if (unlikely(!TestClearPageLRU(page)))
-               return 0;
-
-       if (get_page_testone(page)) {
-               /*
-                * It is being freed elsewhere
-                */
-               __put_page(page);
-               SetPageLRU(page);
-               return -ENOENT;
-       }
-
-       return 1;
-}
index 34cbefd2ebded8437ab2cb82d6613264593df9a5..93a849f742dbbb47ae5ee00464f328b757d597fe 100644 (file)
@@ -149,14 +149,16 @@ struct zone {
        unsigned long           pages_scanned;     /* since last reclaim */
        int                     all_unreclaimable; /* All pages pinned */
 
-       /*
-        * Does the allocator try to reclaim pages from the zone as soon
-        * as it fails a watermark_ok() in __alloc_pages?
-        */
-       int                     reclaim_pages;
        /* A count of how many reclaimers are scanning this zone */
        atomic_t                reclaim_in_progress;
 
+       /*
+        * timestamp (in jiffies) of the last zone reclaim that did not
+        * result in freeing of pages. This is used to avoid repeated scans
+        * if all memory in the zone is in use.
+        */
+       unsigned long           last_unsuccessful_zone_reclaim;
+
        /*
         * prev_priority holds the scanning priority for this zone.  It is
         * defined as the scanning priority at which we achieved our reclaim
index b699e427c00cc1c6ef8d2dbb331040ba81b4ff4c..e6698013e4d070fd3b38376f9425eab70f9ee644 100644 (file)
@@ -56,10 +56,11 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
 #define LOOKUP_ACCESS          (0x0400)
 
 extern int FASTCALL(__user_walk(const char __user *, unsigned, struct nameidata *));
+extern int FASTCALL(__user_walk_fd(int dfd, const char __user *, unsigned, struct nameidata *));
 #define user_path_walk(name,nd) \
-       __user_walk(name, LOOKUP_FOLLOW, nd)
+       __user_walk_fd(AT_FDCWD, name, LOOKUP_FOLLOW, nd)
 #define user_path_walk_link(name,nd) \
-       __user_walk(name, 0, nd)
+       __user_walk_fd(AT_FDCWD, name, 0, nd)
 extern int FASTCALL(path_lookup(const char *, unsigned, struct nameidata *));
 extern int FASTCALL(path_walk(const char *, struct nameidata *));
 extern int FASTCALL(link_path_walk(const char *, struct nameidata *));
@@ -67,7 +68,7 @@ extern void path_release(struct nameidata *);
 extern void path_release_on_umount(struct nameidata *);
 
 extern int __user_path_lookup_open(const char __user *, unsigned lookup_flags, struct nameidata *nd, int open_flags);
-extern int path_lookup_open(const char *, unsigned lookup_flags, struct nameidata *, int open_flags);
+extern int path_lookup_open(int dfd, const char *name, unsigned lookup_flags, struct nameidata *, int open_flags);
 extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
                int (*open)(struct inode *, struct file *));
 extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
index 472f04834809e9a9ec8f28a6038725fa9e50e4b3..6500d4e59d46c5c6f60bfba239aad63f4639b7bf 100644 (file)
@@ -19,7 +19,21 @@ struct xt_get_revision
 /* For standard target */
 #define XT_RETURN (-NF_REPEAT - 1)
 
-#define XT_ALIGN(s) (((s) + (__alignof__(void *)-1)) & ~(__alignof__(void *)-1))
+/* this is a dummy structure to find out the alignment requirement for a struct
+ * containing all the fundamental data types that are used in ipt_entry,
+ * ip6t_entry and arpt_entry.  This sucks, and it is a hack.  It will be my
+ * personal pleasure to remove it -HW
+ */
+struct _xt_align
+{
+       u_int8_t u8;
+       u_int16_t u16;
+       u_int32_t u32;
+       u_int64_t u64;
+};
+
+#define XT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1))         \
+                       & ~(__alignof__(struct _xt_align)-1))
 
 /* Standard return verdict, or do jump. */
 #define XT_STANDARD_TARGET ""
index c4f0793a0a98b5b1ae40154dcb51a5a8ad65da37..8531879eb4645424712e8c8893ea30102b2bf2a1 100644 (file)
@@ -18,13 +18,4 @@ struct ip6t_ah
 #define IP6T_AH_INV_LEN                0x02    /* Invert the sense of length. */
 #define IP6T_AH_INV_MASK       0x03    /* All possible flags. */
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 #endif /*_IP6T_AH_H*/
index 01142b98a231444a6f45072032b377bf6c1c7406..a91b6abc8079e37a56d5713af4a9fc5cedce8c38 100644 (file)
@@ -7,15 +7,6 @@ struct ip6t_esp
        u_int8_t  invflags;                     /* Inverse flags */
 };
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 /* Values for "invflags" field in struct ip6t_esp. */
 #define IP6T_ESP_INV_SPI               0x01    /* Invert the sense of spi. */
 #define IP6T_ESP_INV_MASK      0x01    /* All possible flags. */
index 449a57eca7ddedb51af8d166a37ac83b26bbb867..66070a0d6dfce1a9ccf78b3dfc1eda9ee92a8b7a 100644 (file)
@@ -21,13 +21,4 @@ struct ip6t_frag
 #define IP6T_FRAG_INV_LEN      0x02    /* Invert the sense of length. */
 #define IP6T_FRAG_INV_MASK     0x03    /* All possible flags. */
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 #endif /*_IP6T_FRAG_H*/
index e259b6275bd291df878a44fb7913007ea2c55c20..a07e36380ae8268492d2bca37f92edfe5a1ad488 100644 (file)
@@ -20,13 +20,4 @@ struct ip6t_opts
 #define IP6T_OPTS_INV_LEN      0x01    /* Invert the sense of length. */
 #define IP6T_OPTS_INV_MASK     0x01    /* All possible flags. */
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 #endif /*_IP6T_OPTS_H*/
index f1070fbf2757070e41d60bf0639b1684e07201b2..52156023e8dbe7915e090ab05c8d826a4ae25022 100644 (file)
@@ -30,13 +30,4 @@ struct ip6t_rt
 #define IP6T_RT_INV_LEN                0x04    /* Invert the sense of length. */
 #define IP6T_RT_INV_MASK       0x07    /* All possible flags. */
 
-#define MASK_HOPOPTS    128
-#define MASK_DSTOPTS    64
-#define MASK_ROUTING    32
-#define MASK_FRAGMENT   16
-#define MASK_AH         8
-#define MASK_ESP        4
-#define MASK_NONE       2
-#define MASK_PROTO      1
-
 #endif /*_IP6T_RT_H*/
index 51c231a1e5a669457ef5a4aef34207ad74a78b37..ec7c2e872d728486e851dd14d79d053a41f09124 100644 (file)
@@ -124,7 +124,7 @@ int         nfsd_statfs(struct svc_rqst *, struct svc_fh *,
 
 int            nfsd_notify_change(struct inode *, struct iattr *);
 int            nfsd_permission(struct svc_export *, struct dentry *, int);
-void           nfsd_sync_dir(struct dentry *dp);
+int            nfsd_sync_dir(struct dentry *dp);
 
 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
 #ifdef CONFIG_NFSD_V2_ACL
index 8903688890cea97f94659c63e453bfb127b393e5..77adba7d228105fa1fd0f7fde49720814d92ca55 100644 (file)
@@ -145,8 +145,9 @@ struct nfsd4_lock {
                } ok;
                struct nfsd4_lock_denied        denied;
        } u;
-
-       struct nfs4_stateowner *lk_stateowner;
+       /* The lk_replay_owner is the open owner in the open_to_lock_owner
+        * case and the lock owner otherwise: */
+       struct nfs4_stateowner *lk_replay_owner;
 };
 #define lk_new_open_seqid       v.new.open_seqid
 #define lk_new_open_stateid     v.new.open_stateid
index 5403257ae3e7173218186453cb0141a3b87d71ca..ecc1fc1f0f0442f644269f921ac8563ea8c92642 100644 (file)
 #define PCI_VENDOR_ID_DCI              0x6666
 #define PCI_DEVICE_ID_DCI_PCCOM4       0x0001
 #define PCI_DEVICE_ID_DCI_PCCOM8       0x0002
+#define PCI_DEVICE_ID_DCI_PCCOM2       0x0004
 
 #define PCI_VENDOR_ID_INTEL            0x8086
 #define PCI_DEVICE_ID_INTEL_EESSC      0x0008
index f6da702088f413b3d73c381f2b758c84f487691d..8e8f6098508aca2992e6402f12e83087d3846757 100644 (file)
@@ -92,7 +92,11 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset)
        memset(fdset, 0, FDS_BYTES(nr));
 }
 
-extern int do_select(int n, fd_set_bits *fds, long *timeout);
+#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
+
+extern int do_select(int n, fd_set_bits *fds, s64 *timeout);
+extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
+                      s64 *timeout);
 
 #endif /* KERNEL */
 
index 2df1a1a2fee5215deaa6fbbfcc8b4e37cbeff3bc..0cfcd1c7865ed391c2e655a0d18dc9a74514c627 100644 (file)
@@ -809,6 +809,7 @@ struct task_struct {
        struct sighand_struct *sighand;
 
        sigset_t blocked, real_blocked;
+       sigset_t saved_sigmask;         /* To be restored with TIF_RESTORE_SIGMASK */
        struct sigpending pending;
 
        unsigned long sas_ss_sp;
index cee302aefdb71b3efcbd8027af9a68b79189ddee..73b464f0926a861438c73cbcf27e996a9ccabf0f 100644 (file)
@@ -26,7 +26,7 @@ struct plat_serial8250_port {
        unsigned char   regshift;       /* register shift */
        unsigned char   iotype;         /* UPIO_* */
        unsigned char   hub6;
-       unsigned int    flags;          /* UPF_* flags */
+       upf_t           flags;          /* UPF_* flags */
 };
 
 /*
index ec351005bf9d9ccb5c521a9048dfdcad7c2db57b..4041122dabfcf66cd2eca416a227569f474b5c84 100644 (file)
@@ -203,6 +203,8 @@ struct uart_icount {
        __u32   buf_overrun;
 };
 
+typedef unsigned int __bitwise__ upf_t;
+
 struct uart_port {
        spinlock_t              lock;                   /* port lock */
        unsigned int            iobase;                 /* in/out[bwl] */
@@ -230,36 +232,34 @@ struct uart_port {
        unsigned long           sysrq;                  /* sysrq timeout */
 #endif
 
-       unsigned int            flags;
-
-#define UPF_FOURPORT           (1 << 1)
-#define UPF_SAK                        (1 << 2)
-#define UPF_SPD_MASK           (0x1030)
-#define UPF_SPD_HI             (0x0010)
-#define UPF_SPD_VHI            (0x0020)
-#define UPF_SPD_CUST           (0x0030)
-#define UPF_SPD_SHI            (0x1000)
-#define UPF_SPD_WARP           (0x1010)
-#define UPF_SKIP_TEST          (1 << 6)
-#define UPF_AUTO_IRQ           (1 << 7)
-#define UPF_HARDPPS_CD         (1 << 11)
-#define UPF_LOW_LATENCY                (1 << 13)
-#define UPF_BUGGY_UART         (1 << 14)
-#define UPF_AUTOPROBE          (1 << 15)
-#define UPF_MAGIC_MULTIPLIER   (1 << 16)
-#define UPF_BOOT_ONLYMCA       (1 << 22)
-#define UPF_CONS_FLOW          (1 << 23)
-#define UPF_SHARE_IRQ          (1 << 24)
-#define UPF_BOOT_AUTOCONF      (1 << 28)
-#define UPF_IOREMAP            (1 << 31)
-
-#define UPF_CHANGE_MASK                (0x17fff)
-#define UPF_USR_MASK           (UPF_SPD_MASK|UPF_LOW_LATENCY)
+       upf_t                   flags;
+
+#define UPF_FOURPORT           ((__force upf_t) (1 << 1))
+#define UPF_SAK                        ((__force upf_t) (1 << 2))
+#define UPF_SPD_MASK           ((__force upf_t) (0x1030))
+#define UPF_SPD_HI             ((__force upf_t) (0x0010))
+#define UPF_SPD_VHI            ((__force upf_t) (0x0020))
+#define UPF_SPD_CUST           ((__force upf_t) (0x0030))
+#define UPF_SPD_SHI            ((__force upf_t) (0x1000))
+#define UPF_SPD_WARP           ((__force upf_t) (0x1010))
+#define UPF_SKIP_TEST          ((__force upf_t) (1 << 6))
+#define UPF_AUTO_IRQ           ((__force upf_t) (1 << 7))
+#define UPF_HARDPPS_CD         ((__force upf_t) (1 << 11))
+#define UPF_LOW_LATENCY                ((__force upf_t) (1 << 13))
+#define UPF_BUGGY_UART         ((__force upf_t) (1 << 14))
+#define UPF_MAGIC_MULTIPLIER   ((__force upf_t) (1 << 16))
+#define UPF_CONS_FLOW          ((__force upf_t) (1 << 23))
+#define UPF_SHARE_IRQ          ((__force upf_t) (1 << 24))
+#define UPF_BOOT_AUTOCONF      ((__force upf_t) (1 << 28))
+#define UPF_IOREMAP            ((__force upf_t) (1 << 31))
+
+#define UPF_CHANGE_MASK                ((__force upf_t) (0x17fff))
+#define UPF_USR_MASK           ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY))
 
        unsigned int            mctrl;                  /* current modem ctrl settings */
        unsigned int            timeout;                /* character-based timeout */
        unsigned int            type;                   /* port type */
-       struct uart_ops         *ops;
+       const struct uart_ops   *ops;
        unsigned int            custom_divisor;
        unsigned int            line;                   /* port index */
        unsigned long           mapbase;                /* for ioremap */
@@ -289,6 +289,9 @@ struct uart_state {
 };
 
 #define UART_XMIT_SIZE PAGE_SIZE
+
+typedef unsigned int __bitwise__ uif_t;
+
 /*
  * This is the state information which is only valid when the port
  * is open; it may be freed by the core driver once the device has
@@ -298,17 +301,16 @@ struct uart_state {
 struct uart_info {
        struct tty_struct       *tty;
        struct circ_buf         xmit;
-       unsigned int            flags;
+       uif_t                   flags;
 
 /*
- * These are the flags that specific to info->flags, and reflect our
- * internal state.  They can not be accessed via port->flags.  Low
- * level drivers must not change these, but may query them instead.
+ * Definitions for info->flags.  These are _private_ to serial_core, and
+ * are specific to this structure.  They may be queried by low level drivers.
  */
-#define UIF_CHECK_CD           (1 << 25)
-#define UIF_CTS_FLOW           (1 << 26)
-#define UIF_NORMAL_ACTIVE      (1 << 29)
-#define UIF_INITIALIZED                (1 << 31)
+#define UIF_CHECK_CD           ((__force uif_t) (1 << 25))
+#define UIF_CTS_FLOW           ((__force uif_t) (1 << 26))
+#define UIF_NORMAL_ACTIVE      ((__force uif_t) (1 << 29))
+#define UIF_INITIALIZED                ((__force uif_t) (1 << 31))
 
        int                     blocked_open;
 
@@ -430,7 +432,7 @@ static inline int uart_handle_break(struct uart_port *port)
                port->sysrq = 0;
        }
 #endif
-       if (info->flags & UPF_SAK)
+       if (port->flags & UPF_SAK)
                do_SAK(info->tty);
        return 0;
 }
index e5fd66c5650b53fd1354aab69591ede2d6ea8794..ad7cc22bd424a8bb21c3055efd55e0a79861a5a9 100644 (file)
@@ -926,7 +926,7 @@ static inline int skb_tailroom(const struct sk_buff *skb)
  *     Increase the headroom of an empty &sk_buff by reducing the tail
  *     room. This is only allowed for an empty buffer.
  */
-static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
+static inline void skb_reserve(struct sk_buff *skb, int len)
 {
        skb->data += len;
        skb->tail += len;
index e4086ec8b952a3a87b5f16bd654df3c70a05066a..50cab2a09f28caebad19282c2b8a6b610ef13175 100644 (file)
@@ -246,6 +246,7 @@ struct svc_deferred_req {
        u32                     prot;   /* protocol (UDP or TCP) */
        struct sockaddr_in      addr;
        struct svc_sock         *svsk;  /* where reply must go */
+       u32                     daddr;  /* where reply must come from */
        struct cache_deferred_req handle;
        int                     argslen;
        u32                     args[0];
index e92054d6530bd2d3ba770d35aa9f9a907ca3219e..4a99e4a7fbf31edf83a86c5c35bfd0b4adbe9baa 100644 (file)
@@ -167,6 +167,7 @@ extern void FASTCALL(lru_cache_add_active(struct page *));
 extern void FASTCALL(activate_page(struct page *));
 extern void FASTCALL(mark_page_accessed(struct page *));
 extern void lru_add_drain(void);
+extern int lru_add_drain_all(void);
 extern int rotate_reclaimable_page(struct page *page);
 extern void swap_setup(void);
 
@@ -175,6 +176,17 @@ extern int try_to_free_pages(struct zone **, gfp_t);
 extern int shrink_all_memory(int);
 extern int vm_swappiness;
 
+#ifdef CONFIG_NUMA
+extern int zone_reclaim_mode;
+extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
+#else
+#define zone_reclaim_mode 0
+static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
+{
+       return 0;
+}
+#endif
+
 #ifdef CONFIG_MIGRATION
 extern int isolate_lru_page(struct page *p);
 extern int putback_lru_pages(struct list_head *l);
index 3eed47347013357c01f852222a07c5746fbdec5f..e666d6070569021b62fb6e22e1d15e08fbe0419e 100644 (file)
@@ -510,9 +510,24 @@ asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3,
 asmlinkage long sys_ioprio_set(int which, int who, int ioprio);
 asmlinkage long sys_ioprio_get(int which, int who);
 asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
-                                       unsigned long maxnode);
+                               unsigned long maxnode);
 asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
-                       const unsigned long __user *from, const unsigned long __user *to);
+                               const unsigned long __user *from,
+                               const unsigned long __user *to);
+asmlinkage long sys_mbind(unsigned long start, unsigned long len,
+                               unsigned long mode,
+                               unsigned long __user *nmask,
+                               unsigned long maxnode,
+                               unsigned flags);
+asmlinkage long sys_get_mempolicy(int __user *policy,
+                               unsigned long __user *nmask,
+                               unsigned long maxnode,
+                               unsigned long addr, unsigned long flags);
+
+asmlinkage long sys_inotify_init(void);
+asmlinkage long sys_inotify_add_watch(int fd, const char __user *path,
+                                       u32 mask);
+asmlinkage long sys_inotify_rm_watch(int fd, u32 wd);
 
 asmlinkage long sys_spu_run(int fd, __u32 __user *unpc,
                                 __u32 __user *ustatus);
index 7f472127b7b59d2d3a85e99b15ffea69f670e071..8352a7ce58951673caff4da3f1a7468d77445fc2 100644 (file)
@@ -182,6 +182,7 @@ enum
        VM_SWAP_TOKEN_TIMEOUT=28, /* default time for token time out */
        VM_DROP_PAGECACHE=29,   /* int: nuke lots of pagecache */
        VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */
+       VM_ZONE_RECLAIM_MODE=31,/* reclaim local zone memory before going off node */
 };
 
 
index f2aca7ec63257e67cffba81dc93c6e04a019ba88..614dd8465839c215bebf815e1baaf11d6e399939 100644 (file)
@@ -74,7 +74,7 @@ extern void do_gettimeofday(struct timeval *tv);
 extern int do_settimeofday(struct timespec *tv);
 extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
 #define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
-extern long do_utimes(char __user *filename, struct timeval *times);
+extern long do_utimes(int dfd, char __user *filename, struct timeval *times);
 struct itimerval;
 extern int do_setitimer(int which, struct itimerval *value,
                        struct itimerval *ovalue);
index a52c8c64a5a3af1f23d0f36f72491f9d4a7a2855..33a653913d942fa35c263edf1d019f36f4e0f5b1 100644 (file)
 #define TIPC_MAX_LINK_NAME     60      /* format = Z.C.N:interface-Z.C.N:interface */
 
 /*
- * Link priority limits (range from 0 to # priorities - 1)
+ * Link priority limits (min, default, max, media default)
  */
 
-#define TIPC_NUM_LINK_PRI 32
+#define TIPC_MIN_LINK_PRI      0
+#define TIPC_DEF_LINK_PRI      10
+#define TIPC_MAX_LINK_PRI      31
+#define TIPC_MEDIA_LINK_PRI    (TIPC_MAX_LINK_PRI + 1)
 
 /*
  * Link tolerance limits (min, default, max), in ms
index 315a5163d6a01a7f891251550feaf89716219803..e8eb0040ce3a241928476e46f8c1e6ed673e158f 100644 (file)
 #define REMOTE_DISTANCE                20
 #define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
 #endif
+#ifndef RECLAIM_DISTANCE
+/*
+ * If the distance between nodes in a system is larger than RECLAIM_DISTANCE
+ * (in whatever arch specific measurement units returned by node_distance())
+ * then switch on zone reclaim on boot.
+ */
+#define RECLAIM_DISTANCE 20
+#endif
 #ifndef PENALTY_FOR_NODE_WITH_CPUS
 #define PENALTY_FOR_NODE_WITH_CPUS     (1)
 #endif
index df05f468fa5c312c028f45c9dc1ad81cbd1a2336..9a92aef8b0b29090696711728683d6caa8ac9942 100644 (file)
@@ -803,9 +803,9 @@ enum ieee80211_state {
 #define IEEE80211_24GHZ_MAX_CHANNEL 14
 #define IEEE80211_24GHZ_CHANNELS    14
 
-#define IEEE80211_52GHZ_MIN_CHANNEL 36
+#define IEEE80211_52GHZ_MIN_CHANNEL 34
 #define IEEE80211_52GHZ_MAX_CHANNEL 165
-#define IEEE80211_52GHZ_CHANNELS    32
+#define IEEE80211_52GHZ_CHANNELS    131
 
 enum {
        IEEE80211_CH_PASSIVE_ONLY = (1 << 0),
index 03b766afdc395cab12fe65f083714667627b4b78..cd82c3e998e42860698716feadf6bad275e3ac76 100644 (file)
@@ -25,6 +25,7 @@
 
 #include <linux/types.h>
 #include <linux/list.h>
+#include <net/ieee80211.h>
 #include <asm/atomic.h>
 
 enum {
index d67c8393a343711ad367108e595a48136995c9f8..a2c5e0b88422af2e7e92a98e0b925a0d20d28869 100644 (file)
@@ -327,7 +327,7 @@ struct iw_handler_def
        __u16                   num_private_args;
 
        /* Array of handlers for standard ioctls
-        * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWNAME]
+        * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWCOMMIT]
         */
        const iw_handler *      standard;
 
index a553f39f6aee66ec66e56c920fa46569251911e3..e673b2c984e931c9a80d7c5e677bfdc1984fa4c3 100644 (file)
@@ -175,6 +175,8 @@ void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
 void sctp_icmp_proto_unreachable(struct sock *sk,
                                 struct sctp_association *asoc,
                                 struct sctp_transport *t);
+void sctp_backlog_migrate(struct sctp_association *assoc,
+                         struct sock *oldsk, struct sock *newsk);
 
 /*
  *  Section:  Macros, externs, and inlines
index f5c22d77feab60e030c826bcc6abe92eee1d2d26..8c522ae031bbf5a3011212e00f16289b06e6648c 100644 (file)
@@ -127,9 +127,9 @@ extern struct sctp_globals {
         * RTO.Alpha                - 1/8  (3 when converted to right shifts.)
         * RTO.Beta                 - 1/4  (2 when converted to right shifts.)
         */
-       __u32 rto_initial;
-       __u32 rto_min;
-       __u32 rto_max;
+       unsigned long rto_initial;
+       unsigned long rto_min;
+       unsigned long rto_max;
 
        /* Note: rto_alpha and rto_beta are really defined as inverse
         * powers of two to facilitate integer operations.
@@ -140,12 +140,18 @@ extern struct sctp_globals {
        /* Max.Burst                - 4 */
        int max_burst;
 
-       /* Valid.Cookie.Life        - 60  seconds  */
-       int valid_cookie_life;
-
        /* Whether Cookie Preservative is enabled(1) or not(0) */
        int cookie_preserve_enable;
 
+       /* Valid.Cookie.Life        - 60  seconds  */
+       unsigned long valid_cookie_life;
+
+       /* Delayed SACK timeout  200ms default*/
+       unsigned long sack_timeout;
+
+       /* HB.interval              - 30 seconds  */
+       unsigned long hb_interval;
+
        /* Association.Max.Retrans  - 10 attempts
         * Path.Max.Retrans         - 5  attempts (per destination address)
         * Max.Init.Retransmits     - 8  attempts
@@ -168,12 +174,6 @@ extern struct sctp_globals {
         */
        int rcvbuf_policy;
 
-       /* Delayed SACK timeout  200ms default*/
-       int sack_timeout;
-
-       /* HB.interval              - 30 seconds  */
-       int hb_interval;
-
        /* The following variables are implementation specific.  */
 
        /* Default initialization values to be applied to new associations. */
@@ -405,8 +405,9 @@ struct sctp_cookie {
 /* The format of our cookie that we send to our peer. */
 struct sctp_signed_cookie {
        __u8 signature[SCTP_SECRET_SIZE];
+       __u32 __pad;            /* force sctp_cookie alignment to 64 bits */
        struct sctp_cookie c;
-};
+} __attribute__((packed));
 
 /* This is another convenience type to allocate memory for address
  * params for the maximum size and pass such structures around
@@ -827,7 +828,7 @@ struct sctp_transport {
        __u32 rtt;              /* This is the most recent RTT.  */
 
        /* RTO         : The current retransmission timeout value.  */
-       __u32 rto;
+       unsigned long rto;
 
        /* RTTVAR      : The current RTT variation.  */
        __u32 rttvar;
@@ -877,22 +878,10 @@ struct sctp_transport {
        /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
         * the destination address every heartbeat interval.
         */
-       __u32 hbinterval;
-
-       /* This is the max_retrans value for the transport and will
-        * be initialized from the assocs value.  This can be changed
-        * using SCTP_SET_PEER_ADDR_PARAMS socket option.
-        */
-       __u16 pathmaxrxt;
-
-       /* PMTU       : The current known path MTU.  */
-       __u32 pathmtu;
+       unsigned long hbinterval;
 
        /* SACK delay timeout */
-       __u32 sackdelay;
-
-       /* Flags controling Heartbeat, SACK delay, and Path MTU Discovery. */
-       __u32 param_flags;
+       unsigned long sackdelay;
 
        /* When was the last time (in jiffies) that we heard from this
         * transport?  We use this to pick new active and retran paths.
@@ -904,6 +893,18 @@ struct sctp_transport {
         */
        unsigned long last_time_ecne_reduced;
 
+       /* This is the max_retrans value for the transport and will
+        * be initialized from the assocs value.  This can be changed
+        * using SCTP_SET_PEER_ADDR_PARAMS socket option.
+        */
+       __u16 pathmaxrxt;
+
+       /* PMTU       : The current known path MTU.  */
+       __u32 pathmtu;
+
+       /* Flags controling Heartbeat, SACK delay, and Path MTU Discovery. */
+       __u32 param_flags;
+
        /* The number of times INIT has been sent on this transport. */
        int init_sent_count;
 
@@ -1249,6 +1250,14 @@ struct sctp_endpoint {
        int last_key;
        int key_changed_at;
 
+       /* digest:  This is a digest of the sctp cookie.  This field is
+        *          only used on the receive path when we try to validate
+        *          that the cookie has not been tampered with.  We put
+        *          this here so we pre-allocate this once and can re-use
+        *          on every receive.
+        */
+       __u8 digest[SCTP_SIGNATURE_SIZE];
        /* sendbuf acct. policy.        */
        __u32 sndbuf_policy;
 
@@ -1499,9 +1508,9 @@ struct sctp_association {
         * These values will be initialized by system defaults, but can
         * be modified via the SCTP_RTOINFO socket option.
         */
-       __u32 rto_initial;
-       __u32 rto_max;
-       __u32 rto_min;
+       unsigned long rto_initial;
+       unsigned long rto_max;
+       unsigned long rto_min;
 
        /* Maximum number of new data packets that can be sent in a burst.  */
        int max_burst;
@@ -1519,13 +1528,13 @@ struct sctp_association {
        __u16 init_retries;
 
        /* The largest timeout or RTO value to use in attempting an INIT */
-       __u16 max_init_timeo;
+       unsigned long max_init_timeo;
 
        /* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
         * the destination address every heartbeat interval. This value
         * will be inherited by all new transports.
         */
-       __u32 hbinterval;
+       unsigned long hbinterval;
 
        /* This is the max_retrans value for new transports in the
         * association.
@@ -1537,13 +1546,14 @@ struct sctp_association {
         */
        __u32 pathmtu;
 
-       /* SACK delay timeout */
-       __u32 sackdelay;
-
        /* Flags controling Heartbeat, SACK delay, and Path MTU Discovery. */
        __u32 param_flags;
 
-       int timeouts[SCTP_NUM_TIMEOUT_TYPES];
+       /* SACK delay timeout */
+       unsigned long sackdelay;
+
+
+       unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES];
        struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES];
 
        /* Transport to which SHUTDOWN chunk was last sent.  */
@@ -1648,7 +1658,10 @@ struct sctp_association {
        /* How many duplicated TSNs have we seen?  */
        int numduptsns;
 
-       /* Number of seconds of idle time before an association is closed.  */
+       /* Number of seconds of idle time before an association is closed.
+        * In the association context, this is really used as a boolean
+        * since the real timeout is stored in the timeouts array
+        */
        __u32 autoclose;
 
        /* These are to support
index e94ca4d360358bb535224bca0766af0f706cef61..290e3b4d2aec040c46f1fc2f0ff8b9a8b3398472 100644 (file)
@@ -275,7 +275,7 @@ extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
                            int data_direction, void *buffer, unsigned bufflen,
                            struct scsi_sense_hdr *, int timeout, int retries);
 extern int scsi_execute_async(struct scsi_device *sdev,
-                             const unsigned char *cmd, int data_direction,
+                             const unsigned char *cmd, int cmd_len, int data_direction,
                              void *buffer, unsigned bufflen, int use_sg,
                              int timeout, int retries, void *privdata,
                              void (*done)(void *, char *, int, int),
index 467274a764d172487c2717f0aa7eac2eb5f81b0d..827992949c4bfe2b94635262483a4863aa0b2550 100644 (file)
@@ -554,7 +554,6 @@ struct Scsi_Host {
        /*
         * ordered write support
         */
-       unsigned ordered_flush:1;
        unsigned ordered_tag:1;
 
        /*
index 2b5930ba69ec465b43947e9e47493530448bb871..fb5a2ffae9394c15b9eaebccb91b9896bf67b54a 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/config.h>
 #include <linux/transport_class.h>
+#include <linux/mutex.h>
 
 struct scsi_transport_template;
 struct scsi_target;
index d13ab7d2d8994f38fb9bf422d873152fd4417057..0a813d2883e58089563bd0d266a99e5c029ece3b 100644 (file)
@@ -42,8 +42,8 @@
  */
 
 #include <linux/init.h>
-#include <asm/atomic.h>
 #include <asm/types.h>
+#include <asm/atomic.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/err.h>
index d8a68509e7299df13233c98134c9431e994faa41..685c25175d96374a601d33460dae6e7a35ff0f07 100644 (file)
@@ -30,8 +30,8 @@
  */
 
 #include <linux/init.h>
-#include <asm/atomic.h>
 #include <asm/types.h>
+#include <asm/atomic.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/mount.h>
index 256e5d9f06470480283a04cfc8f5f15e8113b30a..1867290c37e3c164c916e0471c64c0c6e5528a97 100644 (file)
@@ -871,3 +871,31 @@ asmlinkage long compat_sys_stime(compat_time_t __user *tptr)
 }
 
 #endif /* __ARCH_WANT_COMPAT_SYS_TIME */
+
+#ifdef __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
+asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat_size_t sigsetsize)
+{
+       sigset_t newset;
+       compat_sigset_t newset32;
+
+       /* XXX: Don't preclude handling different sized sigset_t's.  */
+       if (sigsetsize != sizeof(sigset_t))
+               return -EINVAL;
+
+       if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
+               return -EFAULT;
+       sigset_from_compat(&newset, &newset32);
+       sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
+
+       spin_lock_irq(&current->sighand->siglock);
+       current->saved_sigmask = current->blocked;
+       current->blocked = newset;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+       set_thread_flag(TIF_RESTORE_SIGMASK);
+       return -ERESTARTNOHAND;
+}
+#endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */
index 773219907dd8a96698f6c0390778538071e2e890..7712912dbc8488d54c7be1c73947a285005ba8a7 100644 (file)
@@ -114,16 +114,16 @@ rcu_torture_alloc(void)
 {
        struct list_head *p;
 
-       spin_lock(&rcu_torture_lock);
+       spin_lock_bh(&rcu_torture_lock);
        if (list_empty(&rcu_torture_freelist)) {
                atomic_inc(&n_rcu_torture_alloc_fail);
-               spin_unlock(&rcu_torture_lock);
+               spin_unlock_bh(&rcu_torture_lock);
                return NULL;
        }
        atomic_inc(&n_rcu_torture_alloc);
        p = rcu_torture_freelist.next;
        list_del_init(p);
-       spin_unlock(&rcu_torture_lock);
+       spin_unlock_bh(&rcu_torture_lock);
        return container_of(p, struct rcu_torture, rtort_free);
 }
 
@@ -134,9 +134,9 @@ static void
 rcu_torture_free(struct rcu_torture *p)
 {
        atomic_inc(&n_rcu_torture_free);
-       spin_lock(&rcu_torture_lock);
+       spin_lock_bh(&rcu_torture_lock);
        list_add_tail(&p->rtort_free, &rcu_torture_freelist);
-       spin_unlock(&rcu_torture_lock);
+       spin_unlock_bh(&rcu_torture_lock);
 }
 
 static void
index 788ecce1e0e4d98701432bc6575cab35c3db3ad5..ec7fd9cee30665e7626ec7d3fb92f9ee6bd57731 100644 (file)
@@ -3850,6 +3850,10 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
 asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
                                       struct sched_param __user *param)
 {
+       /* negative values for policy are not valid */
+       if (policy < 0)
+               return -EINVAL;
+
        return do_sched_setscheduler(pid, policy, param);
 }
 
@@ -5137,7 +5141,7 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
 #define SEARCH_SCOPE           2
 #define MIN_CACHE_SIZE         (64*1024U)
 #define DEFAULT_CACHE_SIZE     (5*1024*1024U)
-#define ITERATIONS             2
+#define ITERATIONS             1
 #define SIZE_THRESH            130
 #define COST_THRESH            130
 
@@ -5476,9 +5480,9 @@ static unsigned long long measure_migration_cost(int cpu1, int cpu2)
                                break;
                        }
                /*
-                * Increase the cachesize in 5% steps:
+                * Increase the cachesize in 10% steps:
                 */
-               size = size * 20 / 19;
+               size = size * 10 / 9;
        }
 
        if (migration_debug)
index 5dafbd36d62e0e1a7245dfa35befcfa28876994c..d3efafd8109a3e4597bf89213d8eaa8fa1852f33 100644 (file)
@@ -2721,6 +2721,32 @@ sys_pause(void)
 
 #endif
 
+#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
+asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
+{
+       sigset_t newset;
+
+       /* XXX: Don't preclude handling different sized sigset_t's.  */
+       if (sigsetsize != sizeof(sigset_t))
+               return -EINVAL;
+
+       if (copy_from_user(&newset, unewset, sizeof(newset)))
+               return -EFAULT;
+       sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
+
+       spin_lock_irq(&current->sighand->siglock);
+       current->saved_sigmask = current->blocked;
+       current->blocked = newset;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+       set_thread_flag(TIF_RESTORE_SIGMASK);
+       return -ERESTARTNOHAND;
+}
+#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
+
 void __init signals_init(void)
 {
        sigqueue_cachep =
index f5d69b6e29f50fb051b8ecf1add5964865c105f4..cb99a42f8b37c60e991332ce33b4fba1dacb9dc6 100644 (file)
@@ -869,6 +869,17 @@ static ctl_table vm_table[] = {
                .proc_handler   = &proc_dointvec_jiffies,
                .strategy       = &sysctl_jiffies,
        },
+#endif
+#ifdef CONFIG_NUMA
+       {
+               .ctl_name       = VM_ZONE_RECLAIM_MODE,
+               .procname       = "zone_reclaim_mode",
+               .data           = &zone_reclaim_mode,
+               .maxlen         = sizeof(zone_reclaim_mode),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec,
+               .strategy       = &zero,
+       },
 #endif
        { .ctl_name = 0 }
 };
index 7477b1d2079e32a614ea82e6ae21fde3abe48373..1f23e683d6aa01da497a86a8cd5ac6804ef5d6fe 100644 (file)
@@ -155,7 +155,7 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
        static int firsttime = 1;
        int error = 0;
 
-       if (!timespec_valid(tv))
+       if (tv && !timespec_valid(tv))
                return -EINVAL;
 
        error = security_settime(tv, tz);
index 89e562feb1b10829ec3d689eb969f670868be686..d1ae2349347e1776f99f572bb355a0a8a6435443 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/bitops.h>
 #include <linux/key.h>
+#include <linux/interrupt.h>
 
 /*
  * UID task count cache, to get fast user lookup in "alloc_uid"
 
 static kmem_cache_t *uid_cachep;
 static struct list_head uidhash_table[UIDHASH_SZ];
+
+/*
+ * The uidhash_lock is mostly taken from process context, but it is
+ * occasionally also taken from softirq/tasklet context, when
+ * task-structs get RCU-freed. Hence all locking must be softirq-safe.
+ */
 static DEFINE_SPINLOCK(uidhash_lock);
 
 struct user_struct root_user = {
@@ -83,14 +90,15 @@ struct user_struct *find_user(uid_t uid)
 {
        struct user_struct *ret;
 
-       spin_lock(&uidhash_lock);
+       spin_lock_bh(&uidhash_lock);
        ret = uid_hash_find(uid, uidhashentry(uid));
-       spin_unlock(&uidhash_lock);
+       spin_unlock_bh(&uidhash_lock);
        return ret;
 }
 
 void free_uid(struct user_struct *up)
 {
+       local_bh_disable();
        if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
                uid_hash_remove(up);
                key_put(up->uid_keyring);
@@ -98,6 +106,7 @@ void free_uid(struct user_struct *up)
                kmem_cache_free(uid_cachep, up);
                spin_unlock(&uidhash_lock);
        }
+       local_bh_enable();
 }
 
 struct user_struct * alloc_uid(uid_t uid)
@@ -105,9 +114,9 @@ struct user_struct * alloc_uid(uid_t uid)
        struct list_head *hashent = uidhashentry(uid);
        struct user_struct *up;
 
-       spin_lock(&uidhash_lock);
+       spin_lock_bh(&uidhash_lock);
        up = uid_hash_find(uid, hashent);
-       spin_unlock(&uidhash_lock);
+       spin_unlock_bh(&uidhash_lock);
 
        if (!up) {
                struct user_struct *new;
@@ -137,7 +146,7 @@ struct user_struct * alloc_uid(uid_t uid)
                 * Before adding this, check whether we raced
                 * on adding the same user already..
                 */
-               spin_lock(&uidhash_lock);
+               spin_lock_bh(&uidhash_lock);
                up = uid_hash_find(uid, hashent);
                if (up) {
                        key_put(new->uid_keyring);
@@ -147,7 +156,7 @@ struct user_struct * alloc_uid(uid_t uid)
                        uid_hash_insert(new, hashent);
                        up = new;
                }
-               spin_unlock(&uidhash_lock);
+               spin_unlock_bh(&uidhash_lock);
 
        }
        return up;
@@ -183,9 +192,9 @@ static int __init uid_cache_init(void)
                INIT_LIST_HEAD(uidhash_table + n);
 
        /* Insert the root user immediately (init already runs as root) */
-       spin_lock(&uidhash_lock);
+       spin_lock_bh(&uidhash_lock);
        uid_hash_insert(&root_user, uidhashentry(0));
-       spin_unlock(&uidhash_lock);
+       spin_unlock_bh(&uidhash_lock);
 
        return 0;
 }
index a965b6b35f266bce90ad5703ec4c81b7270e9d8e..44da3d47699485004994c6af302c7406d606c569 100644 (file)
@@ -94,6 +94,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
  *    ->private_lock           (try_to_unmap_one)
  *    ->tree_lock              (try_to_unmap_one)
  *    ->zone.lru_lock          (follow_page->mark_page_accessed)
+ *    ->zone.lru_lock          (check_pte_range->isolate_lru_page)
  *    ->private_lock           (page_remove_rmap->set_page_dirty)
  *    ->tree_lock              (page_remove_rmap->set_page_dirty)
  *    ->inode_lock             (page_remove_rmap->set_page_dirty)
index 3171f884d2459a30ad113d9008b82b04d833671c..73790188b0eb27a91edd4d0d8efba6c90d4b28be 100644 (file)
@@ -185,8 +185,8 @@ static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
 }
 
 static void gather_stats(struct page *, void *);
-static void migrate_page_add(struct vm_area_struct *vma,
-       struct page *page, struct list_head *pagelist, unsigned long flags);
+static void migrate_page_add(struct page *page, struct list_head *pagelist,
+                               unsigned long flags);
 
 /* Scan through pages checking if pages follow certain conditions. */
 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
@@ -208,6 +208,17 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                page = vm_normal_page(vma, addr, *pte);
                if (!page)
                        continue;
+               /*
+                * The check for PageReserved here is important to avoid
+                * handling zero pages and other pages that may have been
+                * marked special by the system.
+                *
+                * If the PageReserved would not be checked here then f.e.
+                * the location of the zero page could have an influence
+                * on MPOL_MF_STRICT, zero pages would be counted for
+                * the per node stats, and there would be useless attempts
+                * to put zero pages on the migration list.
+                */
                if (PageReserved(page))
                        continue;
                nid = page_to_nid(page);
@@ -216,11 +227,8 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 
                if (flags & MPOL_MF_STATS)
                        gather_stats(page, private);
-               else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
-                       spin_unlock(ptl);
-                       migrate_page_add(vma, page, private, flags);
-                       spin_lock(ptl);
-               }
+               else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+                       migrate_page_add(page, private, flags);
                else
                        break;
        } while (pte++, addr += PAGE_SIZE, addr != end);
@@ -309,6 +317,10 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
        int err;
        struct vm_area_struct *first, *vma, *prev;
 
+       /* Clear the LRU lists so pages can be isolated */
+       if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+               lru_add_drain_all();
+
        first = find_vma(mm, start);
        if (!first)
                return ERR_PTR(-EFAULT);
@@ -519,51 +531,15 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask,
  * page migration
  */
 
-/* Check if we are the only process mapping the page in question */
-static inline int single_mm_mapping(struct mm_struct *mm,
-                       struct address_space *mapping)
-{
-       struct vm_area_struct *vma;
-       struct prio_tree_iter iter;
-       int rc = 1;
-
-       spin_lock(&mapping->i_mmap_lock);
-       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
-               if (mm != vma->vm_mm) {
-                       rc = 0;
-                       goto out;
-               }
-       list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
-               if (mm != vma->vm_mm) {
-                       rc = 0;
-                       goto out;
-               }
-out:
-       spin_unlock(&mapping->i_mmap_lock);
-       return rc;
-}
-
-/*
- * Add a page to be migrated to the pagelist
- */
-static void migrate_page_add(struct vm_area_struct *vma,
-       struct page *page, struct list_head *pagelist, unsigned long flags)
+static void migrate_page_add(struct page *page, struct list_head *pagelist,
+                               unsigned long flags)
 {
        /*
-        * Avoid migrating a page that is shared by others and not writable.
+        * Avoid migrating a page that is shared with others.
         */
-       if ((flags & MPOL_MF_MOVE_ALL) || !page->mapping || PageAnon(page) ||
-           mapping_writably_mapped(page->mapping) ||
-           single_mm_mapping(vma->vm_mm, page->mapping)) {
-               int rc = isolate_lru_page(page);
-
-               if (rc == 1)
+       if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
+               if (isolate_lru_page(page))
                        list_add(&page->lru, pagelist);
-               /*
-                * If the isolate attempt was not successful then we just
-                * encountered an unswappable page. Something must be wrong.
-                */
-               WARN_ON(rc == 0);
        }
 }
 
@@ -1000,6 +976,33 @@ static unsigned interleave_nodes(struct mempolicy *policy)
        return nid;
 }
 
+/*
+ * Depending on the memory policy provide a node from which to allocate the
+ * next slab entry.
+ */
+unsigned slab_node(struct mempolicy *policy)
+{
+       switch (policy->policy) {
+       case MPOL_INTERLEAVE:
+               return interleave_nodes(policy);
+
+       case MPOL_BIND:
+               /*
+                * Follow bind policy behavior and start allocation at the
+                * first node.
+                */
+               return policy->v.zonelist->zones[0]->zone_pgdat->node_id;
+
+       case MPOL_PREFERRED:
+               if (policy->v.preferred_node >= 0)
+                       return policy->v.preferred_node;
+               /* Fall through */
+
+       default:
+               return numa_node_id();
+       }
+}
+
 /* Do static interleaving for a VMA with known offset. */
 static unsigned offset_il_node(struct mempolicy *pol,
                struct vm_area_struct *vma, unsigned long off)
index 5240e426c1f771d24febb3ac60c8b375e794d28d..945559fb63d208bb5c10543aece7b117e84d3e97 100644 (file)
@@ -46,7 +46,7 @@
 static long ratelimit_pages = 32;
 
 static long total_pages;       /* The total number of pages in the machine. */
-static int dirty_exceeded;     /* Dirty mem may be over limit */
+static int dirty_exceeded __cacheline_aligned_in_smp;  /* Dirty mem may be over limit */
 
 /*
  * When balance_dirty_pages decides that the caller needs to perform some
@@ -212,7 +212,8 @@ static void balance_dirty_pages(struct address_space *mapping)
                if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
                        break;
 
-               dirty_exceeded = 1;
+               if (!dirty_exceeded)
+                       dirty_exceeded = 1;
 
                /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
                 * Unstable writes are a feature of certain networked
@@ -234,7 +235,7 @@ static void balance_dirty_pages(struct address_space *mapping)
                blk_congestion_wait(WRITE, HZ/10);
        }
 
-       if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
+       if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded)
                dirty_exceeded = 0;
 
        if (writeback_in_progress(bdi))
index c2e29743a8d156068581c05c027a37be2269a9d4..df54e2fc8ee09760c67d2dd4e6bd496e07286124 100644 (file)
@@ -878,7 +878,9 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
                                mark = (*z)->pages_high;
                        if (!zone_watermark_ok(*z, order, mark,
                                    classzone_idx, alloc_flags))
-                               continue;
+                               if (!zone_reclaim_mode ||
+                                   !zone_reclaim(*z, gfp_mask, order))
+                                       continue;
                }
 
                page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
@@ -1595,13 +1597,22 @@ static void __init build_zonelists(pg_data_t *pgdat)
        prev_node = local_node;
        nodes_clear(used_mask);
        while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
+               int distance = node_distance(local_node, node);
+
+               /*
+                * If another node is sufficiently far away then it is better
+                * to reclaim pages in a zone before going off node.
+                */
+               if (distance > RECLAIM_DISTANCE)
+                       zone_reclaim_mode = 1;
+
                /*
                 * We don't want to pressure a particular node.
                 * So adding penalty to the first node in same
                 * distance group to make it round-robin.
                 */
-               if (node_distance(local_node, node) !=
-                               node_distance(local_node, prev_node))
+
+               if (distance != node_distance(local_node, prev_node))
                        node_load[node] += load;
                prev_node = node;
                load--;
index dfbb89f99a15a610b9463ea84a35068712187d23..d85a99d28c0387a28d92538a88b92ddfef0efbb2 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -33,7 +33,7 @@
  *     mapping->i_mmap_lock
  *       anon_vma->lock
  *         mm->page_table_lock or pte_lock
- *           zone->lru_lock (in mark_page_accessed)
+ *           zone->lru_lock (in mark_page_accessed, isolate_lru_page)
  *           swap_lock (in swap_duplicate, swap_info_get)
  *             mmlist_lock (in mmput, drain_mmlist and others)
  *             mapping->private_lock (in __set_page_dirty_buffers)
index 9374293a301297edef94491494db8e58f591ba82..6f8495e2185b36c4d4e08402a111a59a3deeb01b 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -68,7 +68,7 @@
  * Further notes from the original documentation:
  *
  * 11 April '97.  Started multi-threading - markhe
- *     The global cache-chain is protected by the semaphore 'cache_chain_sem'.
+ *     The global cache-chain is protected by the mutex 'cache_chain_mutex'.
  *     The sem is only needed when accessing/extending the cache-chain, which
  *     can never happen inside an interrupt (kmem_cache_create(),
  *     kmem_cache_shrink() and kmem_cache_reap()).
 #include       <linux/rcupdate.h>
 #include       <linux/string.h>
 #include       <linux/nodemask.h>
+#include       <linux/mempolicy.h>
+#include       <linux/mutex.h>
 
 #include       <asm/uaccess.h>
 #include       <asm/cacheflush.h>
@@ -631,7 +633,7 @@ static kmem_cache_t cache_cache = {
 };
 
 /* Guard access to the cache-chain. */
-static struct semaphore cache_chain_sem;
+static DEFINE_MUTEX(cache_chain_mutex);
 static struct list_head cache_chain;
 
 /*
@@ -772,6 +774,8 @@ static struct array_cache *alloc_arraycache(int node, int entries,
 }
 
 #ifdef CONFIG_NUMA
+static void *__cache_alloc_node(kmem_cache_t *, gfp_t, int);
+
 static inline struct array_cache **alloc_alien_cache(int node, int limit)
 {
        struct array_cache **ac_ptr;
@@ -857,7 +861,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
 
        switch (action) {
        case CPU_UP_PREPARE:
-               down(&cache_chain_sem);
+               mutex_lock(&cache_chain_mutex);
                /* we need to do this right in the beginning since
                 * alloc_arraycache's are going to use this list.
                 * kmalloc_node allows us to add the slab to the right
@@ -912,7 +916,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                                l3->shared = nc;
                        }
                }
-               up(&cache_chain_sem);
+               mutex_unlock(&cache_chain_mutex);
                break;
        case CPU_ONLINE:
                start_cpu_timer(cpu);
@@ -921,7 +925,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
        case CPU_DEAD:
                /* fall thru */
        case CPU_UP_CANCELED:
-               down(&cache_chain_sem);
+               mutex_lock(&cache_chain_mutex);
 
                list_for_each_entry(cachep, &cache_chain, next) {
                        struct array_cache *nc;
@@ -973,13 +977,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
                        spin_unlock_irq(&cachep->spinlock);
                        kfree(nc);
                }
-               up(&cache_chain_sem);
+               mutex_unlock(&cache_chain_mutex);
                break;
 #endif
        }
        return NOTIFY_OK;
       bad:
-       up(&cache_chain_sem);
+       mutex_unlock(&cache_chain_mutex);
        return NOTIFY_BAD;
 }
 
@@ -1047,7 +1051,6 @@ void __init kmem_cache_init(void)
         */
 
        /* 1) create the cache_cache */
-       init_MUTEX(&cache_chain_sem);
        INIT_LIST_HEAD(&cache_chain);
        list_add(&cache_cache.next, &cache_chain);
        cache_cache.colour_off = cache_line_size();
@@ -1168,10 +1171,10 @@ void __init kmem_cache_init(void)
        /* 6) resize the head arrays to their final sizes */
        {
                kmem_cache_t *cachep;
-               down(&cache_chain_sem);
+               mutex_lock(&cache_chain_mutex);
                list_for_each_entry(cachep, &cache_chain, next)
                    enable_cpucache(cachep);
-               up(&cache_chain_sem);
+               mutex_unlock(&cache_chain_mutex);
        }
 
        /* Done! */
@@ -1590,7 +1593,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                BUG();
        }
 
-       down(&cache_chain_sem);
+       mutex_lock(&cache_chain_mutex);
 
        list_for_each(p, &cache_chain) {
                kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
@@ -1856,7 +1859,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
        if (!cachep && (flags & SLAB_PANIC))
                panic("kmem_cache_create(): failed to create slab `%s'\n",
                      name);
-       up(&cache_chain_sem);
+       mutex_unlock(&cache_chain_mutex);
        return cachep;
 }
 EXPORT_SYMBOL(kmem_cache_create);
@@ -2044,18 +2047,18 @@ int kmem_cache_destroy(kmem_cache_t *cachep)
        lock_cpu_hotplug();
 
        /* Find the cache in the chain of caches. */
-       down(&cache_chain_sem);
+       mutex_lock(&cache_chain_mutex);
        /*
         * the chain is never empty, cache_cache is never destroyed
         */
        list_del(&cachep->next);
-       up(&cache_chain_sem);
+       mutex_unlock(&cache_chain_mutex);
 
        if (__cache_shrink(cachep)) {
                slab_error(cachep, "Can't free all objects");
-               down(&cache_chain_sem);
+               mutex_lock(&cache_chain_mutex);
                list_add(&cachep->next, &cache_chain);
-               up(&cache_chain_sem);
+               mutex_unlock(&cache_chain_mutex);
                unlock_cpu_hotplug();
                return 1;
        }
@@ -2570,6 +2573,15 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
        void *objp;
        struct array_cache *ac;
 
+#ifdef CONFIG_NUMA
+       if (unlikely(current->mempolicy && !in_interrupt())) {
+               int nid = slab_node(current->mempolicy);
+
+               if (nid != numa_node_id())
+                       return __cache_alloc_node(cachep, flags, nid);
+       }
+#endif
+
        check_irq_off();
        ac = ac_data(cachep);
        if (likely(ac->avail)) {
@@ -3314,7 +3326,7 @@ static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
  * - clear the per-cpu caches for this CPU.
  * - return freeable pages to the main free memory pool.
  *
- * If we cannot acquire the cache chain semaphore then just give up - we'll
+ * If we cannot acquire the cache chain mutex then just give up - we'll
  * try again on the next iteration.
  */
 static void cache_reap(void *unused)
@@ -3322,7 +3334,7 @@ static void cache_reap(void *unused)
        struct list_head *walk;
        struct kmem_list3 *l3;
 
-       if (down_trylock(&cache_chain_sem)) {
+       if (!mutex_trylock(&cache_chain_mutex)) {
                /* Give up. Setup the next iteration. */
                schedule_delayed_work(&__get_cpu_var(reap_work),
                                      REAPTIMEOUT_CPUC);
@@ -3393,7 +3405,7 @@ static void cache_reap(void *unused)
                cond_resched();
        }
        check_irq_on();
-       up(&cache_chain_sem);
+       mutex_unlock(&cache_chain_mutex);
        drain_remote_pages();
        /* Setup the next iteration */
        schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
@@ -3429,7 +3441,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
        loff_t n = *pos;
        struct list_head *p;
 
-       down(&cache_chain_sem);
+       mutex_lock(&cache_chain_mutex);
        if (!n)
                print_slabinfo_header(m);
        p = cache_chain.next;
@@ -3451,7 +3463,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
 
 static void s_stop(struct seq_file *m, void *p)
 {
-       up(&cache_chain_sem);
+       mutex_unlock(&cache_chain_mutex);
 }
 
 static int s_show(struct seq_file *m, void *p)
@@ -3603,7 +3615,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
                return -EINVAL;
 
        /* Find the cache in the chain of caches. */
-       down(&cache_chain_sem);
+       mutex_lock(&cache_chain_mutex);
        res = -EINVAL;
        list_for_each(p, &cache_chain) {
                kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
@@ -3620,7 +3632,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
                        break;
                }
        }
-       up(&cache_chain_sem);
+       mutex_unlock(&cache_chain_mutex);
        if (res >= 0)
                res = count;
        return res;
index cbb48e721ab9f21fd56b380eed49c8c0e4a21031..bc2442a7b0eef63dedb08958b83c25a4d1afb518 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -174,6 +174,32 @@ void lru_add_drain(void)
        put_cpu();
 }
 
+#ifdef CONFIG_NUMA
+static void lru_add_drain_per_cpu(void *dummy)
+{
+       lru_add_drain();
+}
+
+/*
+ * Returns 0 for success
+ */
+int lru_add_drain_all(void)
+{
+       return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
+}
+
+#else
+
+/*
+ * Returns 0 for success
+ */
+int lru_add_drain_all(void)
+{
+       lru_add_drain();
+       return 0;
+}
+#endif
+
 /*
  * This path almost never happens for VM activity - pages are normally
  * freed via pagevecs.  But it gets used by networking.
index 957fef43fa6081ec62cf3459e3b5db2d865eae57..f1e69c30d203871146d10b13b4e90831b2f0f7cf 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/rmap.h>
 #include <linux/security.h>
 #include <linux/backing-dev.h>
+#include <linux/mutex.h>
 #include <linux/capability.h>
 #include <linux/syscalls.h>
 
@@ -46,12 +47,12 @@ struct swap_list_t swap_list = {-1, -1};
 
 struct swap_info_struct swap_info[MAX_SWAPFILES];
 
-static DECLARE_MUTEX(swapon_sem);
+static DEFINE_MUTEX(swapon_mutex);
 
 /*
  * We need this because the bdev->unplug_fn can sleep and we cannot
  * hold swap_lock while calling the unplug_fn. And swap_lock
- * cannot be turned into a semaphore.
+ * cannot be turned into a mutex.
  */
 static DECLARE_RWSEM(swap_unplug_sem);
 
@@ -1161,7 +1162,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
        up_write(&swap_unplug_sem);
 
        destroy_swap_extents(p);
-       down(&swapon_sem);
+       mutex_lock(&swapon_mutex);
        spin_lock(&swap_lock);
        drain_mmlist();
 
@@ -1180,7 +1181,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
        p->swap_map = NULL;
        p->flags = 0;
        spin_unlock(&swap_lock);
-       up(&swapon_sem);
+       mutex_unlock(&swapon_mutex);
        vfree(swap_map);
        inode = mapping->host;
        if (S_ISBLK(inode->i_mode)) {
@@ -1209,7 +1210,7 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
        int i;
        loff_t l = *pos;
 
-       down(&swapon_sem);
+       mutex_lock(&swapon_mutex);
 
        for (i = 0; i < nr_swapfiles; i++, ptr++) {
                if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
@@ -1238,7 +1239,7 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
 
 static void swap_stop(struct seq_file *swap, void *v)
 {
-       up(&swapon_sem);
+       mutex_unlock(&swapon_mutex);
 }
 
 static int swap_show(struct seq_file *swap, void *v)
@@ -1540,7 +1541,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
                goto bad_swap;
        }
 
-       down(&swapon_sem);
+       mutex_lock(&swapon_mutex);
        spin_lock(&swap_lock);
        p->flags = SWP_ACTIVE;
        nr_swap_pages += nr_good_pages;
@@ -1566,7 +1567,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
                swap_info[prev].next = p - swap_info;
        }
        spin_unlock(&swap_lock);
-       up(&swapon_sem);
+       mutex_unlock(&swapon_mutex);
        error = 0;
        goto out;
 bad_swap:
index bf903b2d198f0820a2d03041b06de25af7a4d1d7..2e34b61a70c727afc2895529c1997a1a8d399eee 100644 (file)
@@ -71,6 +71,9 @@ struct scan_control {
 
        int may_writepage;
 
+       /* Can pages be swapped as part of reclaim? */
+       int may_swap;
+
        /* This context's SWAP_CLUSTER_MAX. If freeing memory for
         * suspend, we effectively ignore SWAP_CLUSTER_MAX.
         * In this context, it doesn't matter that we scan the
@@ -458,6 +461,8 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                 * Try to allocate it some swap space here.
                 */
                if (PageAnon(page) && !PageSwapCache(page)) {
+                       if (!sc->may_swap)
+                               goto keep_locked;
                        if (!add_to_swap(page, GFP_ATOMIC))
                                goto activate_locked;
                }
@@ -586,7 +591,7 @@ static inline void move_to_lru(struct page *page)
 }
 
 /*
- * Add isolated pages on the list back to the LRU
+ * Add isolated pages on the list back to the LRU.
  *
  * returns the number of pages put back.
  */
@@ -760,46 +765,33 @@ next:
        return nr_failed + retry;
 }
 
-static void lru_add_drain_per_cpu(void *dummy)
-{
-       lru_add_drain();
-}
-
 /*
  * Isolate one page from the LRU lists and put it on the
- * indicated list. Do necessary cache draining if the
- * page is not on the LRU lists yet.
+ * indicated list with elevated refcount.
  *
  * Result:
  *  0 = page not on LRU list
  *  1 = page removed from LRU list and added to the specified list.
- * -ENOENT = page is being freed elsewhere.
  */
 int isolate_lru_page(struct page *page)
 {
-       int rc = 0;
-       struct zone *zone = page_zone(page);
+       int ret = 0;
 
-redo:
-       spin_lock_irq(&zone->lru_lock);
-       rc = __isolate_lru_page(page);
-       if (rc == 1) {
-               if (PageActive(page))
-                       del_page_from_active_list(zone, page);
-               else
-                       del_page_from_inactive_list(zone, page);
-       }
-       spin_unlock_irq(&zone->lru_lock);
-       if (rc == 0) {
-               /*
-                * Maybe this page is still waiting for a cpu to drain it
-                * from one of the lru lists?
-                */
-               rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
-               if (rc == 0 && PageLRU(page))
-                       goto redo;
+       if (PageLRU(page)) {
+               struct zone *zone = page_zone(page);
+               spin_lock_irq(&zone->lru_lock);
+               if (TestClearPageLRU(page)) {
+                       ret = 1;
+                       get_page(page);
+                       if (PageActive(page))
+                               del_page_from_active_list(zone, page);
+                       else
+                               del_page_from_inactive_list(zone, page);
+               }
+               spin_unlock_irq(&zone->lru_lock);
        }
-       return rc;
+
+       return ret;
 }
 #endif
 
@@ -831,18 +823,20 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
                page = lru_to_page(src);
                prefetchw_prev_lru_page(page, src, flags);
 
-               switch (__isolate_lru_page(page)) {
-               case 1:
-                       /* Succeeded to isolate page */
-                       list_move(&page->lru, dst);
-                       nr_taken++;
-                       break;
-               case -ENOENT:
-                       /* Not possible to isolate */
-                       list_move(&page->lru, src);
-                       break;
-               default:
+               if (!TestClearPageLRU(page))
                        BUG();
+               list_del(&page->lru);
+               if (get_page_testone(page)) {
+                       /*
+                        * It is being freed elsewhere
+                        */
+                       __put_page(page);
+                       SetPageLRU(page);
+                       list_add(&page->lru, src);
+                       continue;
+               } else {
+                       list_add(&page->lru, dst);
+                       nr_taken++;
                }
        }
 
@@ -1177,6 +1171,7 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
 
        sc.gfp_mask = gfp_mask;
        sc.may_writepage = 0;
+       sc.may_swap = 1;
 
        inc_page_state(allocstall);
 
@@ -1279,6 +1274,7 @@ loop_again:
        total_reclaimed = 0;
        sc.gfp_mask = GFP_KERNEL;
        sc.may_writepage = 0;
+       sc.may_swap = 1;
        sc.nr_mapped = read_page_state(nr_mapped);
 
        inc_page_state(pageoutrun);
@@ -1576,3 +1572,71 @@ static int __init kswapd_init(void)
 }
 
 module_init(kswapd_init)
+
+#ifdef CONFIG_NUMA
+/*
+ * Zone reclaim mode
+ *
+ * If non-zero call zone_reclaim when the number of free pages falls below
+ * the watermarks.
+ *
+ * In the future we may add flags to the mode. However, the page allocator
+ * should only have to check that zone_reclaim_mode != 0 before calling
+ * zone_reclaim().
+ */
+int zone_reclaim_mode __read_mostly;
+
+/*
+ * Mininum time between zone reclaim scans
+ */
+#define ZONE_RECLAIM_INTERVAL HZ/2
+/*
+ * Try to free up some pages from this zone through reclaim.
+ */
+int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
+{
+       int nr_pages = 1 << order;
+       struct task_struct *p = current;
+       struct reclaim_state reclaim_state;
+       struct scan_control sc = {
+               .gfp_mask       = gfp_mask,
+               .may_writepage  = 0,
+               .may_swap       = 0,
+               .nr_mapped      = read_page_state(nr_mapped),
+               .nr_scanned     = 0,
+               .nr_reclaimed   = 0,
+               .priority       = 0
+       };
+
+       if (!(gfp_mask & __GFP_WAIT) ||
+               zone->zone_pgdat->node_id != numa_node_id() ||
+               zone->all_unreclaimable ||
+               atomic_read(&zone->reclaim_in_progress) > 0)
+                       return 0;
+
+       if (time_before(jiffies,
+               zone->last_unsuccessful_zone_reclaim + ZONE_RECLAIM_INTERVAL))
+                       return 0;
+
+       disable_swap_token();
+
+       if (nr_pages > SWAP_CLUSTER_MAX)
+               sc.swap_cluster_max = nr_pages;
+       else
+               sc.swap_cluster_max = SWAP_CLUSTER_MAX;
+
+       cond_resched();
+       p->flags |= PF_MEMALLOC;
+       reclaim_state.reclaimed_slab = 0;
+       p->reclaim_state = &reclaim_state;
+       shrink_zone(zone, &sc);
+       p->reclaim_state = NULL;
+       current->flags &= ~PF_MEMALLOC;
+
+       if (sc.nr_reclaimed == 0)
+               zone->last_unsuccessful_zone_reclaim = jiffies;
+
+       return sc.nr_reclaimed > nr_pages;
+}
+#endif
+
index 9296b269d675771861bf1b2fc596dd31284faeeb..bc603d9aea56c1a798ae6c763e813365e4f689ea 100644 (file)
@@ -150,6 +150,7 @@ endif
 
 source "net/dccp/Kconfig"
 source "net/sctp/Kconfig"
+source "net/tipc/Kconfig"
 source "net/atm/Kconfig"
 source "net/bridge/Kconfig"
 source "net/8021q/Kconfig"
@@ -159,7 +160,6 @@ source "net/ipx/Kconfig"
 source "drivers/net/appletalk/Kconfig"
 source "net/x25/Kconfig"
 source "net/lapb/Kconfig"
-source "net/tipc/Kconfig"
 
 config NET_DIVERT
        bool "Frame Diverter (EXPERIMENTAL)"
index f158fe67dd605fd8ca3dd5b941df04147d0d277f..dc5d0b2427cf4c359aa0eb961b9108808db8cfbc 100644 (file)
@@ -92,7 +92,9 @@ static int ebt_ip_check(const char *tablename, unsigned int hookmask,
                if (info->invflags & EBT_IP_PROTO)
                        return -EINVAL;
                if (info->protocol != IPPROTO_TCP &&
-                   info->protocol != IPPROTO_UDP)
+                   info->protocol != IPPROTO_UDP &&
+                   info->protocol != IPPROTO_SCTP &&
+                   info->protocol != IPPROTO_DCCP)
                         return -EINVAL;
        }
        if (info->bitmask & EBT_IP_DPORT && info->dport[0] > info->dport[1])
index a29c1232c4204e5a66c45e251039c14575930c97..0128fbbe23281241d2929ab9ffd6eb74d71d67d9 100644 (file)
@@ -95,7 +95,9 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
                       "tos=0x%02X, IP proto=%d", NIPQUAD(ih->saddr),
                       NIPQUAD(ih->daddr), ih->tos, ih->protocol);
                if (ih->protocol == IPPROTO_TCP ||
-                   ih->protocol == IPPROTO_UDP) {
+                   ih->protocol == IPPROTO_UDP ||
+                   ih->protocol == IPPROTO_SCTP ||
+                   ih->protocol == IPPROTO_DCCP) {
                        struct tcpudphdr _ports, *pptr;
 
                        pptr = skb_header_pointer(skb, ih->ihl*4,
index fd070a098f20656027dca9c38a3b7a43a0ad1082..ffb82073056e761267dcbd480603d01645cfd7cf 100644 (file)
@@ -2543,13 +2543,14 @@ int dev_ioctl(unsigned int cmd, void __user *arg)
                case SIOCBONDENSLAVE:
                case SIOCBONDRELEASE:
                case SIOCBONDSETHWADDR:
-               case SIOCBONDSLAVEINFOQUERY:
-               case SIOCBONDINFOQUERY:
                case SIOCBONDCHANGEACTIVE:
                case SIOCBRADDIF:
                case SIOCBRDELIF:
                        if (!capable(CAP_NET_ADMIN))
                                return -EPERM;
+                       /* fall through */
+               case SIOCBONDSLAVEINFOQUERY:
+               case SIOCBONDINFOQUERY:
                        dev_load(ifr.ifr_name);
                        rtnl_lock();
                        ret = dev_ifsioc(&ifr, cmd);
index a52665f752240a6e48300d89403d1706675c6ee5..93fbd01d225952c66228d228a66340101448166a 100644 (file)
@@ -64,7 +64,7 @@ static inline void *load_pointer(struct sk_buff *skb, int k,
 }
 
 /**
- *     sk_run_filter   -       run a filter on a socket
+ *     sk_run_filter - run a filter on a socket
  *     @skb: buffer to run the filter on
  *     @filter: filter to apply
  *     @flen: length of filter
@@ -74,13 +74,12 @@ static inline void *load_pointer(struct sk_buff *skb, int k,
  * filtering, filter is the array of filter instructions, and
  * len is the number of filter blocks in the array.
  */
 unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
 {
        struct sock_filter *fentry;     /* We walk down these */
        void *ptr;
-       u32 A = 0;                      /* Accumulator */
-       u32 X = 0;                      /* Index Register */
+       u32 A = 0;                      /* Accumulator */
+       u32 X = 0;                      /* Index Register */
        u32 mem[BPF_MEMWORDS];          /* Scratch Memory Store */
        u32 tmp;
        int k;
@@ -175,7 +174,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
                        continue;
                case BPF_LD|BPF_W|BPF_ABS:
                        k = fentry->k;
- load_w:
+load_w:
                        ptr = load_pointer(skb, k, 4, &tmp);
                        if (ptr != NULL) {
                                A = ntohl(*(u32 *)ptr);
@@ -184,7 +183,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
                        break;
                case BPF_LD|BPF_H|BPF_ABS:
                        k = fentry->k;
- load_h:
+load_h:
                        ptr = load_pointer(skb, k, 2, &tmp);
                        if (ptr != NULL) {
                                A = ntohs(*(u16 *)ptr);
@@ -374,7 +373,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
                case BPF_JMP|BPF_JSET|BPF_K:
                case BPF_JMP|BPF_JSET|BPF_X:
                        /* for conditionals both must be safe */
-                       if (pc + ftest->jt + 1 >= flen ||
+                       if (pc + ftest->jt + 1 >= flen ||
                            pc + ftest->jf + 1 >= flen)
                                return -EINVAL;
                        break;
@@ -384,7 +383,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
                }
        }
 
-        return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
+       return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
 }
 
 /**
@@ -404,8 +403,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        int err;
 
        /* Make sure new filter is there and in the right amounts. */
-        if (fprog->filter == NULL)
-                return -EINVAL;
+       if (fprog->filter == NULL)
+               return -EINVAL;
 
        fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
        if (!fp)
index 281a632fa6a6eaf37085797076177f394b55327a..ea51f8d02eb8654d0533afd7bc5eb38a0e08c7fd 100644 (file)
@@ -703,7 +703,7 @@ int netpoll_setup(struct netpoll *np)
                }
        }
 
-       if (!memcmp(np->local_mac, "\0\0\0\0\0\0", 6) && ndev->dev_addr)
+       if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
                memcpy(np->local_mac, ndev->dev_addr, 6);
 
        if (!np->local_ip) {
index 39063122fbb7383ab1b8cf2f569fac285ca4e15f..da16f8fd1494e9203c2d4bd3dbc3ef06ffe8594e 100644 (file)
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/wait.h>
+#include <linux/etherdevice.h>
 #include <net/checksum.h>
 #include <net/ipv6.h>
 #include <net/addrconf.h>
@@ -281,8 +282,8 @@ struct pktgen_dev {
         __u32 src_mac_count; /* How many MACs to iterate through */
         __u32 dst_mac_count; /* How many MACs to iterate through */
         
-        unsigned char dst_mac[6];
-        unsigned char src_mac[6];
+        unsigned char dst_mac[ETH_ALEN];
+        unsigned char src_mac[ETH_ALEN];
         
         __u32 cur_dst_mac_offset;
         __u32 cur_src_mac_offset;
@@ -594,16 +595,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
 
        seq_puts(seq, "     src_mac: ");
 
-       if ((pkt_dev->src_mac[0] == 0) && 
-           (pkt_dev->src_mac[1] == 0) && 
-           (pkt_dev->src_mac[2] == 0) && 
-           (pkt_dev->src_mac[3] == 0) && 
-           (pkt_dev->src_mac[4] == 0) && 
-           (pkt_dev->src_mac[5] == 0)) 
-
+       if (is_zero_ether_addr(pkt_dev->src_mac))
                for (i = 0; i < 6; i++) 
                        seq_printf(seq,  "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? "  " : ":");
-
        else 
                for (i = 0; i < 6; i++) 
                        seq_printf(seq,  "%02X%s", pkt_dev->src_mac[i], i == 5 ? "  " : ":");
@@ -1189,9 +1183,9 @@ static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer
        }
        if (!strcmp(name, "dst_mac")) {
                char *v = valstr;
-                unsigned char old_dmac[6];
+               unsigned char old_dmac[ETH_ALEN];
                unsigned char *m = pkt_dev->dst_mac;
-                memcpy(old_dmac, pkt_dev->dst_mac, 6);
+               memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN);
                 
                len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
                 if (len < 0) { return len; }
@@ -1220,8 +1214,8 @@ static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer
                }
 
                /* Set up Dest MAC */
-                if (memcmp(old_dmac, pkt_dev->dst_mac, 6) != 0) 
-                        memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, 6);
+               if (compare_ether_addr(old_dmac, pkt_dev->dst_mac))
+                       memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
                 
                sprintf(pg_result, "OK: dstmac");
                return count;
@@ -1560,17 +1554,11 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
         
         /* Default to the interface's mac if not explicitly set. */
 
-       if ((pkt_dev->src_mac[0] == 0) && 
-           (pkt_dev->src_mac[1] == 0) && 
-           (pkt_dev->src_mac[2] == 0) && 
-           (pkt_dev->src_mac[3] == 0) && 
-           (pkt_dev->src_mac[4] == 0) && 
-           (pkt_dev->src_mac[5] == 0)) {
+       if (is_zero_ether_addr(pkt_dev->src_mac))
+              memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, ETH_ALEN);
 
-              memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, 6);
-       }
         /* Set up Dest MAC */
-        memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, 6);
+       memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
 
         /* Set up pkt size */
         pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size;
@@ -1872,13 +1860,14 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
         */
        mod_cur_headers(pkt_dev);
 
-       skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16, GFP_ATOMIC);
+       datalen = (odev->hard_header_len + 16) & ~0xf;
+       skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen, GFP_ATOMIC);
        if (!skb) {
                sprintf(pkt_dev->result, "No memory");
                return NULL;
        }
 
-       skb_reserve(skb, 16);
+       skb_reserve(skb, datalen);
 
        /*  Reserve for ethernet and IP header  */
        eth = (__u8 *) skb_push(skb, 14);
index d0732e9c8560e10d8ec239957fa3b8d4d3c1df00..6766f118f07068719b551644066839a154267cf7 100644 (file)
@@ -135,13 +135,15 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
                            int fclone)
 {
+       kmem_cache_t *cache;
        struct skb_shared_info *shinfo;
        struct sk_buff *skb;
        u8 *data;
 
+       cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
+
        /* Get the HEAD */
-       skb = kmem_cache_alloc(fclone ? skbuff_fclone_cache : skbuff_head_cache,
-                               gfp_mask & ~__GFP_DMA);
+       skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
        if (!skb)
                goto out;
 
@@ -180,7 +182,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 out:
        return skb;
 nodata:
-       kmem_cache_free(skbuff_head_cache, skb);
+       kmem_cache_free(cache, skb);
        skb = NULL;
        goto out;
 }
index ce9cb77c5c29c272b7f6462c0b0a835cd1383454..2c77dafbd091f9c4627097b037b0a816bf18301e 100644 (file)
@@ -144,7 +144,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
                                                 const unsigned char state)
 {
        unsigned int gap;
-       signed long new_head;
+       long new_head;
 
        if (av->dccpav_vec_len + packets > av->dccpav_buf_len)
                return -ENOBUFS;
index 7a121802faa92d7aa33b07734d8c6a0f3165662e..960aa78cdb972e1662bde851d7b025c61502f0fe 100644 (file)
@@ -350,6 +350,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
        u8 src[ETH_ALEN];
        struct ieee80211_crypt_data *crypt = NULL;
        int keyidx = 0;
+       int can_be_decrypted = 0;
 
        hdr = (struct ieee80211_hdr_4addr *)skb->data;
        stats = &ieee->stats;
@@ -410,12 +411,23 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
                return 1;
        }
 
-       if (is_multicast_ether_addr(hdr->addr1)
-           ? ieee->host_mc_decrypt : ieee->host_decrypt) {
+       can_be_decrypted = (is_multicast_ether_addr(hdr->addr1) ||
+                           is_broadcast_ether_addr(hdr->addr2)) ?
+           ieee->host_mc_decrypt : ieee->host_decrypt;
+
+       if (can_be_decrypted) {
                int idx = 0;
-               if (skb->len >= hdrlen + 3)
+               if (skb->len >= hdrlen + 3) {
+                       /* Top two-bits of byte 3 are the key index */
                        idx = skb->data[hdrlen + 3] >> 6;
+               }
+
+               /* ieee->crypt[] is WEP_KEY (4) in length.  Given that idx
+                * is only allowed 2-bits of storage, no value of idx can
+                * be provided via above code that would result in idx
+                * being out of range */
                crypt = ieee->crypt[idx];
+
 #ifdef NOT_YET
                sta = NULL;
 
@@ -553,7 +565,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
 
        /* skb: hdr + (possibly fragmented, possibly encrypted) payload */
 
-       if (ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
+       if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted &&
            (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
                goto rx_dropped;
 
@@ -617,7 +629,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
 
        /* skb: hdr + (possible reassembled) full MSDU payload; possibly still
         * encrypted/authenticated */
-       if (ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
+       if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted &&
            ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
                goto rx_dropped;
 
@@ -1439,7 +1451,7 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
                break;
 
        case IEEE80211_STYPE_PROBE_REQ:
-               IEEE80211_DEBUG_MGMT("recieved auth (%d)\n",
+               IEEE80211_DEBUG_MGMT("received auth (%d)\n",
                                     WLAN_FC_GET_STYPE(le16_to_cpu
                                                       (header->frame_ctl)));
 
@@ -1473,7 +1485,7 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
                break;
        case IEEE80211_STYPE_AUTH:
 
-               IEEE80211_DEBUG_MGMT("recieved auth (%d)\n",
+               IEEE80211_DEBUG_MGMT("received auth (%d)\n",
                                     WLAN_FC_GET_STYPE(le16_to_cpu
                                                       (header->frame_ctl)));
 
index 23e1630f50b7cf872e01286c01a108cbc66c478f..f87c6b89f8450e513fd6c434864e7ae65b267ad0 100644 (file)
@@ -232,15 +232,18 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee,
        return start;
 }
 
+#define SCAN_ITEM_SIZE 128
+
 int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
                          struct iw_request_info *info,
                          union iwreq_data *wrqu, char *extra)
 {
        struct ieee80211_network *network;
        unsigned long flags;
+       int err = 0;
 
        char *ev = extra;
-       char *stop = ev + IW_SCAN_MAX_DATA;
+       char *stop = ev + wrqu->data.length;
        int i = 0;
 
        IEEE80211_DEBUG_WX("Getting scan\n");
@@ -249,6 +252,11 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
 
        list_for_each_entry(network, &ieee->network_list, list) {
                i++;
+               if (stop - ev < SCAN_ITEM_SIZE) {
+                       err = -E2BIG;
+                       break;
+               }
+
                if (ieee->scan_age == 0 ||
                    time_after(network->last_scanned + ieee->scan_age, jiffies))
                        ev = ipw2100_translate_scan(ieee, ev, stop, network);
@@ -270,7 +278,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
 
        IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i);
 
-       return 0;
+       return err;
 }
 
 int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
index 192092b89e534732ae8ab1eb6f62015ecfb2f2a1..0b4e95f93dad62cbfed26297ada2fbe86cf07163 100644 (file)
@@ -233,7 +233,18 @@ static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
        case IGMPV3_MODE_IS_EXCLUDE:
                if (gdeleted || sdeleted)
                        return 0;
-               return !(pmc->gsquery && !psf->sf_gsresp);
+               if (!(pmc->gsquery && !psf->sf_gsresp)) {
+                       if (pmc->sfmode == MCAST_INCLUDE)
+                               return 1;
+                       /* don't include if this source is excluded
+                        * in all filters
+                        */
+                       if (psf->sf_count[MCAST_INCLUDE])
+                               return type == IGMPV3_MODE_IS_INCLUDE;
+                       return pmc->sfcount[MCAST_EXCLUDE] ==
+                               psf->sf_count[MCAST_EXCLUDE];
+               }
+               return 0;
        case IGMPV3_CHANGE_TO_INCLUDE:
                if (gdeleted || sdeleted)
                        return 0;
@@ -385,7 +396,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
        struct igmpv3_report *pih;
        struct igmpv3_grec *pgr = NULL;
        struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
-       int scount, first, isquery, truncate;
+       int scount, stotal, first, isquery, truncate;
 
        if (pmc->multiaddr == IGMP_ALL_HOSTS)
                return skb;
@@ -395,25 +406,13 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
        truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
                    type == IGMPV3_CHANGE_TO_EXCLUDE;
 
+       stotal = scount = 0;
+
        psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
 
-       if (!*psf_list) {
-               if (type == IGMPV3_ALLOW_NEW_SOURCES ||
-                   type == IGMPV3_BLOCK_OLD_SOURCES)
-                       return skb;
-               if (pmc->crcount || isquery) {
-                       /* make sure we have room for group header and at
-                        * least one source.
-                        */
-                       if (skb && AVAILABLE(skb) < sizeof(struct igmpv3_grec)+
-                           sizeof(__u32)) {
-                               igmpv3_sendpack(skb);
-                               skb = NULL; /* add_grhead will get a new one */
-                       }
-                       skb = add_grhead(skb, pmc, type, &pgr);
-               }
-               return skb;
-       }
+       if (!*psf_list)
+               goto empty_source;
+
        pih = skb ? (struct igmpv3_report *)skb->h.igmph : NULL;
 
        /* EX and TO_EX get a fresh packet, if needed */
@@ -426,7 +425,6 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
                }
        }
        first = 1;
-       scount = 0;
        psf_prev = NULL;
        for (psf=*psf_list; psf; psf=psf_next) {
                u32 *psrc;
@@ -460,7 +458,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
                }
                psrc = (u32 *)skb_put(skb, sizeof(u32));
                *psrc = psf->sf_inaddr;
-               scount++;
+               scount++; stotal++;
                if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
                     type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
                        psf->sf_crcount--;
@@ -475,6 +473,21 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
                }
                psf_prev = psf;
        }
+
+empty_source:
+       if (!stotal) {
+               if (type == IGMPV3_ALLOW_NEW_SOURCES ||
+                   type == IGMPV3_BLOCK_OLD_SOURCES)
+                       return skb;
+               if (pmc->crcount || isquery) {
+                       /* make sure we have room for group header */
+                       if (skb && AVAILABLE(skb)<sizeof(struct igmpv3_grec)) {
+                               igmpv3_sendpack(skb);
+                               skb = NULL; /* add_grhead will get a new one */
+                       }
+                       skb = add_grhead(skb, pmc, type, &pgr);
+               }
+       }
        if (pgr)
                pgr->grec_nsrcs = htons(scount);
 
@@ -557,11 +570,11 @@ static void igmpv3_send_cr(struct in_device *in_dev)
                        skb = add_grec(skb, pmc, dtype, 1, 1);
                }
                if (pmc->crcount) {
-                       pmc->crcount--;
                        if (pmc->sfmode == MCAST_EXCLUDE) {
                                type = IGMPV3_CHANGE_TO_INCLUDE;
                                skb = add_grec(skb, pmc, type, 1, 0);
                        }
+                       pmc->crcount--;
                        if (pmc->crcount == 0) {
                                igmpv3_clear_zeros(&pmc->tomb);
                                igmpv3_clear_zeros(&pmc->sources);
@@ -594,12 +607,12 @@ static void igmpv3_send_cr(struct in_device *in_dev)
 
                /* filter mode changes */
                if (pmc->crcount) {
-                       pmc->crcount--;
                        if (pmc->sfmode == MCAST_EXCLUDE)
                                type = IGMPV3_CHANGE_TO_EXCLUDE;
                        else
                                type = IGMPV3_CHANGE_TO_INCLUDE;
                        skb = add_grec(skb, pmc, type, 0, 0);
+                       pmc->crcount--;
                }
                spin_unlock_bh(&pmc->lock);
        }
@@ -735,11 +748,43 @@ static void igmp_timer_expire(unsigned long data)
        ip_ma_put(im);
 }
 
-static void igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __u32 *srcs)
+/* mark EXCLUDE-mode sources */
+static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __u32 *srcs)
 {
        struct ip_sf_list *psf;
        int i, scount;
 
+       scount = 0;
+       for (psf=pmc->sources; psf; psf=psf->sf_next) {
+               if (scount == nsrcs)
+                       break;
+               for (i=0; i<nsrcs; i++) {
+                       /* skip inactive filters */
+                       if (pmc->sfcount[MCAST_INCLUDE] ||
+                           pmc->sfcount[MCAST_EXCLUDE] !=
+                           psf->sf_count[MCAST_EXCLUDE])
+                               continue;
+                       if (srcs[i] == psf->sf_inaddr) {
+                               scount++;
+                               break;
+                       }
+               }
+       }
+       pmc->gsquery = 0;
+       if (scount == nsrcs)    /* all sources excluded */
+               return 0;
+       return 1;
+}
+
+static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __u32 *srcs)
+{
+       struct ip_sf_list *psf;
+       int i, scount;
+
+       if (pmc->sfmode == MCAST_EXCLUDE)
+               return igmp_xmarksources(pmc, nsrcs, srcs);
+
+       /* mark INCLUDE-mode sources */
        scount = 0;
        for (psf=pmc->sources; psf; psf=psf->sf_next) {
                if (scount == nsrcs)
@@ -751,6 +796,12 @@ static void igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __u32 *srcs)
                                break;
                        }
        }
+       if (!scount) {
+               pmc->gsquery = 0;
+               return 0;
+       }
+       pmc->gsquery = 1;
+       return 1;
 }
 
 static void igmp_heard_report(struct in_device *in_dev, u32 group)
@@ -845,6 +896,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
         */
        read_lock(&in_dev->mc_list_lock);
        for (im=in_dev->mc_list; im!=NULL; im=im->next) {
+               int changed;
+
                if (group && group != im->multiaddr)
                        continue;
                if (im->multiaddr == IGMP_ALL_HOSTS)
@@ -854,10 +907,11 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                        im->gsquery = im->gsquery && mark;
                else
                        im->gsquery = mark;
-               if (im->gsquery)
-                       igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
+               changed = !im->gsquery ||
+                       igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
                spin_unlock_bh(&im->lock);
-               igmp_mod_timer(im, max_delay);
+               if (changed)
+                       igmp_mod_timer(im, max_delay);
        }
        read_unlock(&in_dev->mc_list_lock);
 }
@@ -916,7 +970,7 @@ int igmp_rcv(struct sk_buff *skb)
        case IGMP_MTRACE_RESP:
                break;
        default:
-               NETDEBUG(KERN_DEBUG "New IGMP type=%d, why we do not know about it?\n", ih->type);
+               break;
        }
 
 drop:
@@ -1510,7 +1564,7 @@ static void sf_markstate(struct ip_mc_list *pmc)
 
 static int sf_setstate(struct ip_mc_list *pmc)
 {
-       struct ip_sf_list *psf;
+       struct ip_sf_list *psf, *dpsf;
        int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
        int qrv = pmc->interface->mr_qrv;
        int new_in, rv;
@@ -1522,8 +1576,46 @@ static int sf_setstate(struct ip_mc_list *pmc)
                                !psf->sf_count[MCAST_INCLUDE];
                } else
                        new_in = psf->sf_count[MCAST_INCLUDE] != 0;
-               if (new_in != psf->sf_oldin) {
-                       psf->sf_crcount = qrv;
+               if (new_in) {
+                       if (!psf->sf_oldin) {
+                               struct ip_sf_list *prev = 0;
+
+                               for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) {
+                                       if (dpsf->sf_inaddr == psf->sf_inaddr)
+                                               break;
+                                       prev = dpsf;
+                               }
+                               if (dpsf) {
+                                       if (prev)
+                                               prev->sf_next = dpsf->sf_next;
+                                       else
+                                               pmc->tomb = dpsf->sf_next;
+                                       kfree(dpsf);
+                               }
+                               psf->sf_crcount = qrv;
+                               rv++;
+                       }
+               } else if (psf->sf_oldin) {
+
+                       psf->sf_crcount = 0;
+                       /*
+                        * add or update "delete" records if an active filter
+                        * is now inactive
+                        */
+                       for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next)
+                               if (dpsf->sf_inaddr == psf->sf_inaddr)
+                                       break;
+                       if (!dpsf) {
+                               dpsf = (struct ip_sf_list *)
+                                       kmalloc(sizeof(*dpsf), GFP_ATOMIC);
+                               if (!dpsf)
+                                       continue;
+                               *dpsf = *psf;
+                               /* pmc->lock held by callers */
+                               dpsf->sf_next = pmc->tomb;
+                               pmc->tomb = dpsf;
+                       }
+                       dpsf->sf_crcount = qrv;
                        rv++;
                }
        }
index bcefe64b93177c7e4705065c6f909f46eb7a06e2..e5c5b3202f024a89c9162df7e8ef19308fc17778 100644 (file)
@@ -46,7 +46,6 @@ obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
 obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
 
 # matches
-obj-$(CONFIG_IP_NF_MATCH_HELPER) += ipt_helper.o
 obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o
 obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
 obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o
index c777abf16cb7a99573a2a9d841b96c9fe8f6f4e5..56794797d55b9eedce61b7542ceae00a627d68ae 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/in.h>
 #include <linux/list.h>
 #include <linux/seq_file.h>
+#include <linux/interrupt.h>
 
 static DEFINE_RWLOCK(ip_ct_gre_lock);
 #define ASSERT_READ_LOCK(x)
index 709debcc69c92d2ee1403e8ad86cb61a2e946328..18ca8258a1c597c170fd718f5c8556a1eaefb382 100644 (file)
@@ -95,7 +95,10 @@ match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info)
 static int match(const struct sk_buff *skb,
                  const struct net_device *in,
                  const struct net_device *out,
-                 const void *matchinfo, int offset, int *hotdrop)
+                 const void *matchinfo,
+                 int offset,
+                 unsigned int protoff,
+                 int *hotdrop)
 {
        const struct ipt_policy_info *info = matchinfo;
        int ret;
@@ -113,7 +116,7 @@ static int match(const struct sk_buff *skb,
        return ret;
 }
 
-static int checkentry(const char *tablename, const struct ipt_ip *ip,
+static int checkentry(const char *tablename, const void *ip_void,
                       void *matchinfo, unsigned int matchsize,
                       unsigned int hook_mask)
 {
index 165a4d81efa4a75663e72e12d3cfa12eddb9f969..f29a12da5109d91a9a9dfebd318c2b1aa5a63b94 100644 (file)
  */
  
 #include <linux/config.h> 
+#include <linux/types.h>
 #include <asm/atomic.h>
 #include <asm/byteorder.h>
 #include <asm/current.h>
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
-#include <linux/types.h>
 #include <linux/stddef.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
index f701a136a6ae010c48fae0204b7e028aafb90c8f..d82c242ea7049b7c84aaf03f71ae0dbc38f86c0c 100644 (file)
@@ -240,9 +240,9 @@ static unsigned                     rt_hash_mask;
 static int                     rt_hash_log;
 static unsigned int            rt_hash_rnd;
 
-static struct rt_cache_stat *rt_cache_stat;
-#define RT_CACHE_STAT_INC(field)                                         \
-               (per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++)
+static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
+#define RT_CACHE_STAT_INC(field) \
+       (per_cpu(rt_cache_stat, raw_smp_processor_id()).field++)
 
 static int rt_intern_hash(unsigned hash, struct rtable *rth,
                                struct rtable **res);
@@ -401,7 +401,7 @@ static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
                if (!cpu_possible(cpu))
                        continue;
                *pos = cpu+1;
-               return per_cpu_ptr(rt_cache_stat, cpu);
+               return &per_cpu(rt_cache_stat, cpu);
        }
        return NULL;
 }
@@ -414,7 +414,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                if (!cpu_possible(cpu))
                        continue;
                *pos = cpu+1;
-               return per_cpu_ptr(rt_cache_stat, cpu);
+               return &per_cpu(rt_cache_stat, cpu);
        }
        return NULL;
        
@@ -3160,10 +3160,6 @@ int __init ip_rt_init(void)
        ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
        ip_rt_max_size = (rt_hash_mask + 1) * 16;
 
-       rt_cache_stat = alloc_percpu(struct rt_cache_stat);
-       if (!rt_cache_stat)
-               return -ENOMEM;
-
        devinet_init();
        ip_fib_init();
 
@@ -3191,7 +3187,6 @@ int __init ip_rt_init(void)
        if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
            !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, 
                                             proc_net_stat))) {
-               free_percpu(rt_cache_stat);
                return -ENOMEM;
        }
        rtstat_pde->proc_fops = &rt_cpu_seq_fops;
index 3284cfb993e6bd9c70963ba0f389b582f4183ab9..128de4d7c0b7dd40c95d94b3efdbab73a2e20041 100644 (file)
@@ -230,7 +230,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
                        if (tp->snd_cwnd < tp->snd_cwnd_clamp)
                                tp->snd_cwnd++;
                        tp->snd_cwnd_cnt = 0;
-                       ca->ccount++;
                }
        }
 }
index dfb4f145a139af35f9ef2ebb61723d5ae3edd5d1..d328d59861438ae0b8bad815a3ef34cacf7de328 100644 (file)
@@ -2644,7 +2644,7 @@ static int if6_seq_show(struct seq_file *seq, void *v)
 {
        struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
        seq_printf(seq,
-                  NIP6_FMT " %02x %02x %02x %02x %8s\n",
+                  NIP6_SEQFMT " %02x %02x %02x %02x %8s\n",
                   NIP6(ifp->addr),
                   ifp->idev->dev->ifindex,
                   ifp->prefix_len,
index 72bd08af2dfb0ac1c1f1d3fe0fe94cf7cdf9363d..840a33d3329696bfc5f9c2a67ca6e86b6336dca0 100644 (file)
@@ -532,7 +532,7 @@ static int ac6_seq_show(struct seq_file *seq, void *v)
        struct ac6_iter_state *state = ac6_seq_private(seq);
 
        seq_printf(seq,
-                  "%-4d %-15s " NIP6_FMT " %5d\n",
+                  "%-4d %-15s " NIP6_SEQFMT " %5d\n",
                   state->dev->ifindex, state->dev->name,
                   NIP6(im->aca_addr),
                   im->aca_users);
index 4183c8dac7f6e16c448bd7e74f1f67ab649507fc..69cbe8a66d02ce7f97cc4163b216b45251972434 100644 (file)
@@ -629,7 +629,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
 {
        while(fl) {
                seq_printf(seq,
-                          "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_FMT " %-4d\n",
+                          "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_SEQFMT " %-4d\n",
                           (unsigned)ntohl(fl->label),
                           fl->share,
                           (unsigned)fl->owner,
@@ -645,7 +645,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
 static int ip6fl_seq_show(struct seq_file *seq, void *v)
 {
        if (v == SEQ_START_TOKEN)
-               seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-39s %s\n",
+               seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
                           "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
        else
                ip6fl_fl_seq_show(seq, v);
index 0e03eabfb9da3fea0541736e1dc742fb9052de36..4420948a1bfe9f23ec29de602551dd6bca9cbcf3 100644 (file)
@@ -1252,8 +1252,7 @@ int igmp6_event_query(struct sk_buff *skb)
                }
        } else {
                for (ma = idev->mc_list; ma; ma=ma->next) {
-                       if (group_type != IPV6_ADDR_ANY &&
-                           !ipv6_addr_equal(group, &ma->mca_addr))
+                       if (!ipv6_addr_equal(group, &ma->mca_addr))
                                continue;
                        spin_lock_bh(&ma->mca_lock);
                        if (ma->mca_flags & MAF_TIMER_RUNNING) {
@@ -1268,11 +1267,10 @@ int igmp6_event_query(struct sk_buff *skb)
                                        ma->mca_flags &= ~MAF_GSQUERY;
                        }
                        if (!(ma->mca_flags & MAF_GSQUERY) ||
-                          mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs))
+                           mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs))
                                igmp6_group_queried(ma, max_delay);
                        spin_unlock_bh(&ma->mca_lock);
-                       if (group_type != IPV6_ADDR_ANY)
-                               break;
+                       break;
                }
        }
        read_unlock_bh(&idev->lock);
@@ -1351,7 +1349,7 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
                         * in all filters
                         */
                        if (psf->sf_count[MCAST_INCLUDE])
-                               return 0;
+                               return type == MLD2_MODE_IS_INCLUDE;
                        return pmc->mca_sfcount[MCAST_EXCLUDE] ==
                                psf->sf_count[MCAST_EXCLUDE];
                }
@@ -1966,7 +1964,7 @@ static void sf_markstate(struct ifmcaddr6 *pmc)
 
 static int sf_setstate(struct ifmcaddr6 *pmc)
 {
-       struct ip6_sf_list *psf;
+       struct ip6_sf_list *psf, *dpsf;
        int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
        int qrv = pmc->idev->mc_qrv;
        int new_in, rv;
@@ -1978,8 +1976,48 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
                                !psf->sf_count[MCAST_INCLUDE];
                } else
                        new_in = psf->sf_count[MCAST_INCLUDE] != 0;
-               if (new_in != psf->sf_oldin) {
-                       psf->sf_crcount = qrv;
+               if (new_in) {
+                       if (!psf->sf_oldin) {
+                               struct ip6_sf_list *prev = 0;
+
+                               for (dpsf=pmc->mca_tomb; dpsf;
+                                    dpsf=dpsf->sf_next) {
+                                       if (ipv6_addr_equal(&dpsf->sf_addr,
+                                           &psf->sf_addr))
+                                               break;
+                                       prev = dpsf;
+                               }
+                               if (dpsf) {
+                                       if (prev)
+                                               prev->sf_next = dpsf->sf_next;
+                                       else
+                                               pmc->mca_tomb = dpsf->sf_next;
+                                       kfree(dpsf);
+                               }
+                               psf->sf_crcount = qrv;
+                               rv++;
+                       }
+               } else if (psf->sf_oldin) {
+                       psf->sf_crcount = 0;
+                       /*
+                        * add or update "delete" records if an active filter
+                        * is now inactive
+                        */
+                       for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next)
+                               if (ipv6_addr_equal(&dpsf->sf_addr,
+                                   &psf->sf_addr))
+                                       break;
+                       if (!dpsf) {
+                               dpsf = (struct ip6_sf_list *)
+                                       kmalloc(sizeof(*dpsf), GFP_ATOMIC);
+                               if (!dpsf)
+                                       continue;
+                               *dpsf = *psf;
+                               /* pmc->mca_lock held by callers */
+                               dpsf->sf_next = pmc->mca_tomb;
+                               pmc->mca_tomb = dpsf;
+                       }
+                       dpsf->sf_crcount = qrv;
                        rv++;
                }
        }
@@ -2373,7 +2411,7 @@ static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
        struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
 
        seq_printf(seq,
-                  "%-4d %-15s " NIP6_FMT " %5d %08X %ld\n", 
+                  "%-4d %-15s " NIP6_SEQFMT " %5d %08X %ld\n", 
                   state->dev->ifindex, state->dev->name,
                   NIP6(im->mca_addr),
                   im->mca_users, im->mca_flags,
@@ -2542,12 +2580,12 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
        if (v == SEQ_START_TOKEN) {
                seq_printf(seq, 
                           "%3s %6s "
-                          "%39s %39s %6s %6s\n", "Idx",
+                          "%32s %32s %6s %6s\n", "Idx",
                           "Device", "Multicast Address",
                           "Source Address", "INC", "EXC");
        } else {
                seq_printf(seq,
-                          "%3d %6.6s " NIP6_FMT " " NIP6_FMT " %6lu %6lu\n",
+                          "%3d %6.6s " NIP6_SEQFMT " " NIP6_SEQFMT " %6lu %6lu\n",
                           state->dev->ifindex, state->dev->name,
                           NIP6(state->im->mca_addr),
                           NIP6(psf->sf_addr),
index 663b4749820d7d5e9ffcb955970457e7e7098470..db6073c941633f3ccd602f646f286defb2074253 100644 (file)
@@ -4,7 +4,6 @@
 
 # Link order matters here.
 obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
-obj-$(CONFIG_IP6_NF_MATCH_LENGTH) += ip6t_length.o
 obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
 obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o
 obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
index 80fe82669ce2616af5f99e866140dbf50d65a316..b4c153a53500f242b3fae2d67f67ef3ec22dbe53 100644 (file)
@@ -36,19 +36,19 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 #endif
 
 /*
- * (Type & 0xC0) >> 6
- *     0       -> ignorable
- *     1       -> must drop the packet
- *     2       -> send ICMP PARM PROB regardless and drop packet
- *     3       -> Send ICMP if not a multicast address and drop packet
+ *  (Type & 0xC0) >> 6
+ *     0       -> ignorable
+ *     1       -> must drop the packet
+ *     2       -> send ICMP PARM PROB regardless and drop packet
+ *     3       -> Send ICMP if not a multicast address and drop packet
  *  (Type & 0x20) >> 5
- *     0       -> invariant
- *     1       -> can change the routing
+ *     0       -> invariant
+ *     1       -> can change the routing
  *  (Type & 0x1F) Type
- *      0      -> Pad1 (only 1 byte!)
- *      1      -> PadN LENGTH info (total length = length + 2)
- *      C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k )
- *      5      -> RTALERT 2 x x
+ *           -> Pad1 (only 1 byte!)
+ *           -> PadN LENGTH info (total length = length + 2)
+ *     C0 | 2  -> JUMBO 4 x x x x ( xxxx > 64k )
+ *           -> RTALERT 2 x x
  */
 
 static int
@@ -60,16 +60,16 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
-       struct ipv6_opt_hdr _optsh, *oh;
-       const struct ip6t_opts *optinfo = matchinfo;
-       unsigned int temp;
-       unsigned int ptr;
-       unsigned int hdrlen = 0;
-       unsigned int ret = 0;
-       u8 _opttype, *tp = NULL;
-       u8 _optlen, *lp = NULL;
-       unsigned int optlen;
-       
+       struct ipv6_opt_hdr _optsh, *oh;
+       const struct ip6t_opts *optinfo = matchinfo;
+       unsigned int temp;
+       unsigned int ptr;
+       unsigned int hdrlen = 0;
+       unsigned int ret = 0;
+       u8 _opttype, *tp = NULL;
+       u8 _optlen, *lp = NULL;
+       unsigned int optlen;
+
 #if HOPBYHOP
        if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0)
 #else
@@ -77,42 +77,41 @@ match(const struct sk_buff *skb,
 #endif
                return 0;
 
-       oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
-       if (oh == NULL){
-              *hotdrop = 1;
-                       return 0;
-       }
-
-       hdrlen = ipv6_optlen(oh);
-       if (skb->len - ptr < hdrlen){
-              /* Packet smaller than it's length field */
-                       return 0;
-       }
-
-       DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
-
-       DEBUGP("len %02X %04X %02X ",
-                       optinfo->hdrlen, hdrlen,
-                       (!(optinfo->flags & IP6T_OPTS_LEN) ||
-                           ((optinfo->hdrlen == hdrlen) ^
-                           !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
-
-       ret = (oh != NULL)
-                       &&
-               (!(optinfo->flags & IP6T_OPTS_LEN) ||
-                           ((optinfo->hdrlen == hdrlen) ^
-                           !!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
-
-       ptr += 2;
-       hdrlen -= 2;
-       if ( !(optinfo->flags & IP6T_OPTS_OPTS) ){
-              return ret;
+       oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
+       if (oh == NULL) {
+               *hotdrop = 1;
+               return 0;
+       }
+
+       hdrlen = ipv6_optlen(oh);
+       if (skb->len - ptr < hdrlen) {
+               /* Packet smaller than it's length field */
+               return 0;
+       }
+
+       DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
+
+       DEBUGP("len %02X %04X %02X ",
+              optinfo->hdrlen, hdrlen,
+              (!(optinfo->flags & IP6T_OPTS_LEN) ||
+               ((optinfo->hdrlen == hdrlen) ^
+                !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
+
+       ret = (oh != NULL) &&
+             (!(optinfo->flags & IP6T_OPTS_LEN) ||
+              ((optinfo->hdrlen == hdrlen) ^
+               !!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
+
+       ptr += 2;
+       hdrlen -= 2;
+       if (!(optinfo->flags & IP6T_OPTS_OPTS)) {
+               return ret;
        } else if (optinfo->flags & IP6T_OPTS_NSTRICT) {
                DEBUGP("Not strict - not implemented");
        } else {
                DEBUGP("Strict ");
-               DEBUGP("#%d ",optinfo->optsnr);
-               for(temp=0; temp<optinfo->optsnr; temp++){
+               DEBUGP("#%d ", optinfo->optsnr);
+               for (temp = 0; temp < optinfo->optsnr; temp++) {
                        /* type field exists ? */
                        if (hdrlen < 1)
                                break;
@@ -122,10 +121,10 @@ match(const struct sk_buff *skb,
                                break;
 
                        /* Type check */
-                       if (*tp != (optinfo->opts[temp] & 0xFF00)>>8){
+                       if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) {
                                DEBUGP("Tbad %02X %02X\n",
                                       *tp,
-                                      (optinfo->opts[temp] & 0xFF00)>>8);
+                                      (optinfo->opts[temp] & 0xFF00) >> 8);
                                return 0;
                        } else {
                                DEBUGP("Tok ");
@@ -169,7 +168,8 @@ match(const struct sk_buff *skb,
                }
                if (temp == optinfo->optsnr)
                        return ret;
-               else return 0;
+               else
+                       return 0;
        }
 
        return 0;
@@ -178,25 +178,24 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const void *info,
-          void *matchinfo,
-          unsigned int matchinfosize,
-          unsigned int hook_mask)
+          const void *info,
+          void *matchinfo,
+          unsigned int matchinfosize,
+          unsigned int hook_mask)
 {
-       const struct ip6t_opts *optsinfo = matchinfo;
-
-       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
-              DEBUGP("ip6t_opts: matchsize %u != %u\n",
-                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
-              return 0;
-       }
-       if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
-              DEBUGP("ip6t_opts: unknown flags %X\n",
-                      optsinfo->invflags);
-              return 0;
-       }
-
-       return 1;
+       const struct ip6t_opts *optsinfo = matchinfo;
+
+       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
+               DEBUGP("ip6t_opts: matchsize %u != %u\n",
+                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
+               return 0;
+       }
+       if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
+               DEBUGP("ip6t_opts: unknown flags %X\n", optsinfo->invflags);
+               return 0;
+       }
+
+       return 1;
 }
 
 static struct ip6t_match opts_match = {
@@ -212,12 +211,12 @@ static struct ip6t_match opts_match = {
 
 static int __init init(void)
 {
-       return ip6t_register_match(&opts_match);
+       return ip6t_register_match(&opts_match);
 }
 
 static void __exit cleanup(void)
 {
-       ip6t_unregister_match(&opts_match);
+       ip6t_unregister_match(&opts_match);
 }
 
 module_init(init);
index ddf5f571909c03b27b8038e11af598da276aea6e..27396ac0b9edb0a229c270ca92a648c325ae61fa 100644 (file)
@@ -27,45 +27,45 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
+       unsigned char eui64[8];
+       int i = 0;
 
-    unsigned char eui64[8];
-    int i=0;
-
-     if ( !(skb->mac.raw >= skb->head
-                && (skb->mac.raw + ETH_HLEN) <= skb->data)
-                && offset != 0) {
-                        *hotdrop = 1;
-                        return 0;
-                }
-    
-    memset(eui64, 0, sizeof(eui64));
-
-    if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) {
-      if (skb->nh.ipv6h->version == 0x6) { 
-         memcpy(eui64, eth_hdr(skb)->h_source, 3);
-         memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
-        eui64[3]=0xff;
-        eui64[4]=0xfe;
-        eui64[0] |= 0x02;
-
-        i=0;
-        while ((skb->nh.ipv6h->saddr.s6_addr[8+i] ==
-                        eui64[i]) && (i<8)) i++;
-
-        if ( i == 8 )
-               return 1;
-      }
-    }
-
-    return 0;
+       if (!(skb->mac.raw >= skb->head &&
+             (skb->mac.raw + ETH_HLEN) <= skb->data) &&
+           offset != 0) {
+               *hotdrop = 1;
+               return 0;
+       }
+
+       memset(eui64, 0, sizeof(eui64));
+
+       if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) {
+               if (skb->nh.ipv6h->version == 0x6) {
+                       memcpy(eui64, eth_hdr(skb)->h_source, 3);
+                       memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
+                       eui64[3] = 0xff;
+                       eui64[4] = 0xfe;
+                       eui64[0] |= 0x02;
+
+                       i = 0;
+                       while ((skb->nh.ipv6h->saddr.s6_addr[8+i] == eui64[i])
+                              && (i < 8))
+                               i++;
+
+                       if (i == 8)
+                               return 1;
+               }
+       }
+
+       return 0;
 }
 
 static int
 ip6t_eui64_checkentry(const char *tablename,
-                  const void  *ip,
-                  void *matchinfo,
-                  unsigned int matchsize,
-                  unsigned int hook_mask)
+                     const void *ip,
+                     void *matchinfo,
+                     unsigned int matchsize,
+                     unsigned int hook_mask)
 {
        if (hook_mask
            & ~((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN) |
index a9964b946ed503409eeaa750d044ac82fa654bc3..4c14125a0e26d18ec775bcfcfe0b27661d98713e 100644 (file)
@@ -31,12 +31,12 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 static inline int
 id_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert)
 {
-       int r=0;
-       DEBUGP("frag id_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
-              min,id,max);
-       r=(id >= min && id <= max) ^ invert;
-       DEBUGP(" result %s\n",r? "PASS" : "FAILED");
-       return r;
+       int r = 0;
+       DEBUGP("frag id_match:%c 0x%x <= 0x%x <= 0x%x", invert ? '!' : ' ',
+              min, id, max);
+       r = (id >= min && id <= max) ^ invert;
+       DEBUGP(" result %s\n", r ? "PASS" : "FAILED");
+       return r;
 }
 
 static int
@@ -48,92 +48,91 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
-       struct frag_hdr _frag, *fh;
-       const struct ip6t_frag *fraginfo = matchinfo;
-       unsigned int ptr;
+       struct frag_hdr _frag, *fh;
+       const struct ip6t_frag *fraginfo = matchinfo;
+       unsigned int ptr;
 
        if (ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL) < 0)
                return 0;
 
        fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag);
-       if (fh == NULL){
+       if (fh == NULL) {
                *hotdrop = 1;
                return 0;
        }
 
-       DEBUGP("INFO %04X ", fh->frag_off);
-       DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7);
-       DEBUGP("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6);
-       DEBUGP("MF %04X ", fh->frag_off & htons(IP6_MF));
-       DEBUGP("ID %u %08X\n", ntohl(fh->identification),
-             ntohl(fh->identification));
-
-       DEBUGP("IPv6 FRAG id %02X ",
-                       (id_match(fraginfo->ids[0], fraginfo->ids[1],
-                           ntohl(fh->identification),
-                           !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))));
-       DEBUGP("res %02X %02X%04X %02X ", 
-                       (fraginfo->flags & IP6T_FRAG_RES), fh->reserved,
-               ntohs(fh->frag_off) & 0x6,
-                       !((fraginfo->flags & IP6T_FRAG_RES)
-                       && (fh->reserved || (ntohs(fh->frag_off) & 0x06))));
-       DEBUGP("first %02X %02X %02X ", 
-                       (fraginfo->flags & IP6T_FRAG_FST),
-               ntohs(fh->frag_off) & ~0x7,
-                       !((fraginfo->flags & IP6T_FRAG_FST)
-                       && (ntohs(fh->frag_off) & ~0x7)));
-       DEBUGP("mf %02X %02X %02X ", 
-                       (fraginfo->flags & IP6T_FRAG_MF),
-               ntohs(fh->frag_off) & IP6_MF,
-                       !((fraginfo->flags & IP6T_FRAG_MF)
-                       && !((ntohs(fh->frag_off) & IP6_MF))));
-       DEBUGP("last %02X %02X %02X\n", 
-                       (fraginfo->flags & IP6T_FRAG_NMF),
-               ntohs(fh->frag_off) & IP6_MF,
-                       !((fraginfo->flags & IP6T_FRAG_NMF)
-                       && (ntohs(fh->frag_off) & IP6_MF)));
-
-       return (fh != NULL)
-                       &&
-                       (id_match(fraginfo->ids[0], fraginfo->ids[1],
-                         ntohl(fh->identification),
-                           !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)))
-               &&
-               !((fraginfo->flags & IP6T_FRAG_RES)
-                       && (fh->reserved || (ntohs(fh->frag_off) & 0x6)))
-               &&
-               !((fraginfo->flags & IP6T_FRAG_FST)
-                       && (ntohs(fh->frag_off) & ~0x7))
-               &&
-               !((fraginfo->flags & IP6T_FRAG_MF)
-                       && !(ntohs(fh->frag_off) & IP6_MF))
-               &&
-               !((fraginfo->flags & IP6T_FRAG_NMF)
-                       && (ntohs(fh->frag_off) & IP6_MF));
+       DEBUGP("INFO %04X ", fh->frag_off);
+       DEBUGP("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7);
+       DEBUGP("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6);
+       DEBUGP("MF %04X ", fh->frag_off & htons(IP6_MF));
+       DEBUGP("ID %u %08X\n", ntohl(fh->identification),
+              ntohl(fh->identification));
+
+       DEBUGP("IPv6 FRAG id %02X ",
+              (id_match(fraginfo->ids[0], fraginfo->ids[1],
+                        ntohl(fh->identification),
+                        !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))));
+       DEBUGP("res %02X %02X%04X %02X ",
+              (fraginfo->flags & IP6T_FRAG_RES), fh->reserved,
+              ntohs(fh->frag_off) & 0x6,
+              !((fraginfo->flags & IP6T_FRAG_RES)
+                && (fh->reserved || (ntohs(fh->frag_off) & 0x06))));
+       DEBUGP("first %02X %02X %02X ",
+              (fraginfo->flags & IP6T_FRAG_FST),
+              ntohs(fh->frag_off) & ~0x7,
+              !((fraginfo->flags & IP6T_FRAG_FST)
+                && (ntohs(fh->frag_off) & ~0x7)));
+       DEBUGP("mf %02X %02X %02X ",
+              (fraginfo->flags & IP6T_FRAG_MF),
+              ntohs(fh->frag_off) & IP6_MF,
+              !((fraginfo->flags & IP6T_FRAG_MF)
+                && !((ntohs(fh->frag_off) & IP6_MF))));
+       DEBUGP("last %02X %02X %02X\n",
+              (fraginfo->flags & IP6T_FRAG_NMF),
+              ntohs(fh->frag_off) & IP6_MF,
+              !((fraginfo->flags & IP6T_FRAG_NMF)
+                && (ntohs(fh->frag_off) & IP6_MF)));
+
+       return (fh != NULL)
+              &&
+              (id_match(fraginfo->ids[0], fraginfo->ids[1],
+                        ntohl(fh->identification),
+                        !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)))
+              &&
+              !((fraginfo->flags & IP6T_FRAG_RES)
+                && (fh->reserved || (ntohs(fh->frag_off) & 0x6)))
+              &&
+              !((fraginfo->flags & IP6T_FRAG_FST)
+                && (ntohs(fh->frag_off) & ~0x7))
+              &&
+              !((fraginfo->flags & IP6T_FRAG_MF)
+                && !(ntohs(fh->frag_off) & IP6_MF))
+              &&
+              !((fraginfo->flags & IP6T_FRAG_NMF)
+                && (ntohs(fh->frag_off) & IP6_MF));
 }
 
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const void *ip,
-          void *matchinfo,
-          unsigned int matchinfosize,
-          unsigned int hook_mask)
+          const void *ip,
+          void *matchinfo,
+          unsigned int matchinfosize,
+          unsigned int hook_mask)
 {
-       const struct ip6t_frag *fraginfo = matchinfo;
-
-       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_frag))) {
-              DEBUGP("ip6t_frag: matchsize %u != %u\n",
-                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_frag)));
-              return 0;
-       }
-       if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) {
-              DEBUGP("ip6t_frag: unknown flags %X\n",
-                      fraginfo->invflags);
-              return 0;
-       }
-
-       return 1;
+       const struct ip6t_frag *fraginfo = matchinfo;
+
+       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_frag))) {
+               DEBUGP("ip6t_frag: matchsize %u != %u\n",
+                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_frag)));
+               return 0;
+       }
+       if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) {
+               DEBUGP("ip6t_frag: unknown flags %X\n", fraginfo->invflags);
+               return 0;
+       }
+
+       return 1;
 }
 
 static struct ip6t_match frag_match = {
@@ -145,12 +144,12 @@ static struct ip6t_match frag_match = {
 
 static int __init init(void)
 {
-       return ip6t_register_match(&frag_match);
+       return ip6t_register_match(&frag_match);
 }
 
 static void __exit cleanup(void)
 {
-       ip6t_unregister_match(&frag_match);
+       ip6t_unregister_match(&frag_match);
 }
 
 module_init(init);
index ed8ded18bbd4f5970fadb49f180ef9bd275ea327..37a8474a7e0c911e1508aafc5a8e4f6f4d86c4a1 100644 (file)
@@ -36,19 +36,19 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 #endif
 
 /*
- * (Type & 0xC0) >> 6
- *     0       -> ignorable
- *     1       -> must drop the packet
- *     2       -> send ICMP PARM PROB regardless and drop packet
- *     3       -> Send ICMP if not a multicast address and drop packet
+ *  (Type & 0xC0) >> 6
+ *     0       -> ignorable
+ *     1       -> must drop the packet
+ *     2       -> send ICMP PARM PROB regardless and drop packet
+ *     3       -> Send ICMP if not a multicast address and drop packet
  *  (Type & 0x20) >> 5
- *     0       -> invariant
- *     1       -> can change the routing
+ *     0       -> invariant
+ *     1       -> can change the routing
  *  (Type & 0x1F) Type
- *      0      -> Pad1 (only 1 byte!)
- *      1      -> PadN LENGTH info (total length = length + 2)
- *      C0 | 2 -> JUMBO 4 x x x x ( xxxx > 64k )
- *      5      -> RTALERT 2 x x
+ *           -> Pad1 (only 1 byte!)
+ *           -> PadN LENGTH info (total length = length + 2)
+ *     C0 | 2  -> JUMBO 4 x x x x ( xxxx > 64k )
+ *           -> RTALERT 2 x x
  */
 
 static int
@@ -60,16 +60,16 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
-       struct ipv6_opt_hdr _optsh, *oh;
-       const struct ip6t_opts *optinfo = matchinfo;
-       unsigned int temp;
-       unsigned int ptr;
-       unsigned int hdrlen = 0;
-       unsigned int ret = 0;
-       u8 _opttype, *tp = NULL;
-       u8 _optlen, *lp = NULL;
-       unsigned int optlen;
-       
+       struct ipv6_opt_hdr _optsh, *oh;
+       const struct ip6t_opts *optinfo = matchinfo;
+       unsigned int temp;
+       unsigned int ptr;
+       unsigned int hdrlen = 0;
+       unsigned int ret = 0;
+       u8 _opttype, *tp = NULL;
+       u8 _optlen, *lp = NULL;
+       unsigned int optlen;
+
 #if HOPBYHOP
        if (ipv6_find_hdr(skb, &ptr, NEXTHDR_HOP, NULL) < 0)
 #else
@@ -77,42 +77,41 @@ match(const struct sk_buff *skb,
 #endif
                return 0;
 
-       oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
-       if (oh == NULL){
-              *hotdrop = 1;
-                       return 0;
-       }
-
-       hdrlen = ipv6_optlen(oh);
-       if (skb->len - ptr < hdrlen){
-              /* Packet smaller than it's length field */
-                       return 0;
-       }
-
-       DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
-
-       DEBUGP("len %02X %04X %02X ",
-                       optinfo->hdrlen, hdrlen,
-                       (!(optinfo->flags & IP6T_OPTS_LEN) ||
-                           ((optinfo->hdrlen == hdrlen) ^
-                           !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
-
-       ret = (oh != NULL)
-                       &&
-               (!(optinfo->flags & IP6T_OPTS_LEN) ||
-                           ((optinfo->hdrlen == hdrlen) ^
-                           !!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
-
-       ptr += 2;
-       hdrlen -= 2;
-       if ( !(optinfo->flags & IP6T_OPTS_OPTS) ){
-              return ret;
+       oh = skb_header_pointer(skb, ptr, sizeof(_optsh), &_optsh);
+       if (oh == NULL) {
+               *hotdrop = 1;
+               return 0;
+       }
+
+       hdrlen = ipv6_optlen(oh);
+       if (skb->len - ptr < hdrlen) {
+               /* Packet smaller than it's length field */
+               return 0;
+       }
+
+       DEBUGP("IPv6 OPTS LEN %u %u ", hdrlen, oh->hdrlen);
+
+       DEBUGP("len %02X %04X %02X ",
+              optinfo->hdrlen, hdrlen,
+              (!(optinfo->flags & IP6T_OPTS_LEN) ||
+               ((optinfo->hdrlen == hdrlen) ^
+                !!(optinfo->invflags & IP6T_OPTS_INV_LEN))));
+
+       ret = (oh != NULL) &&
+             (!(optinfo->flags & IP6T_OPTS_LEN) ||
+              ((optinfo->hdrlen == hdrlen) ^
+               !!(optinfo->invflags & IP6T_OPTS_INV_LEN)));
+
+       ptr += 2;
+       hdrlen -= 2;
+       if (!(optinfo->flags & IP6T_OPTS_OPTS)) {
+               return ret;
        } else if (optinfo->flags & IP6T_OPTS_NSTRICT) {
                DEBUGP("Not strict - not implemented");
        } else {
                DEBUGP("Strict ");
-               DEBUGP("#%d ",optinfo->optsnr);
-               for(temp=0; temp<optinfo->optsnr; temp++){
+               DEBUGP("#%d ", optinfo->optsnr);
+               for (temp = 0; temp < optinfo->optsnr; temp++) {
                        /* type field exists ? */
                        if (hdrlen < 1)
                                break;
@@ -122,10 +121,10 @@ match(const struct sk_buff *skb,
                                break;
 
                        /* Type check */
-                       if (*tp != (optinfo->opts[temp] & 0xFF00)>>8){
+                       if (*tp != (optinfo->opts[temp] & 0xFF00) >> 8) {
                                DEBUGP("Tbad %02X %02X\n",
                                       *tp,
-                                      (optinfo->opts[temp] & 0xFF00)>>8);
+                                      (optinfo->opts[temp] & 0xFF00) >> 8);
                                return 0;
                        } else {
                                DEBUGP("Tok ");
@@ -169,7 +168,8 @@ match(const struct sk_buff *skb,
                }
                if (temp == optinfo->optsnr)
                        return ret;
-               else return 0;
+               else
+                       return 0;
        }
 
        return 0;
@@ -178,25 +178,24 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const void *entry,
-          void *matchinfo,
-          unsigned int matchinfosize,
-          unsigned int hook_mask)
+          const void *entry,
+          void *matchinfo,
+          unsigned int matchinfosize,
+          unsigned int hook_mask)
 {
-       const struct ip6t_opts *optsinfo = matchinfo;
-
-       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
-              DEBUGP("ip6t_opts: matchsize %u != %u\n",
-                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
-              return 0;
-       }
-       if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
-              DEBUGP("ip6t_opts: unknown flags %X\n",
-                      optsinfo->invflags);
-              return 0;
-       }
-
-       return 1;
+       const struct ip6t_opts *optsinfo = matchinfo;
+
+       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_opts))) {
+               DEBUGP("ip6t_opts: matchsize %u != %u\n",
+                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_opts)));
+               return 0;
+       }
+       if (optsinfo->invflags & ~IP6T_OPTS_INV_MASK) {
+               DEBUGP("ip6t_opts: unknown flags %X\n", optsinfo->invflags);
+               return 0;
+       }
+
+       return 1;
 }
 
 static struct ip6t_match opts_match = {
@@ -212,12 +211,12 @@ static struct ip6t_match opts_match = {
 
 static int __init init(void)
 {
-       return ip6t_register_match(&opts_match);
+       return ip6t_register_match(&opts_match);
 }
 
 static void __exit cleanup(void)
 {
-       ip6t_unregister_match(&opts_match);
+       ip6t_unregister_match(&opts_match);
 }
 
 module_init(init);
index fda1ceaf5a2976c579ef22e7899791561a465bff..83ad6b272f7e6fbf201f09fc1b74a19a71abd0b2 100644 (file)
@@ -50,20 +50,20 @@ ipv6header_match(const struct sk_buff *skb,
        len = skb->len - ptr;
        temp = 0;
 
-        while (ip6t_ext_hdr(nexthdr)) {
+       while (ip6t_ext_hdr(nexthdr)) {
                struct ipv6_opt_hdr _hdr, *hp;
-               int hdrlen;
+               int hdrlen;
 
                /* Is there enough space for the next ext header? */
-                if (len < (int)sizeof(struct ipv6_opt_hdr))
-                        return 0;
+               if (len < (int)sizeof(struct ipv6_opt_hdr))
+                       return 0;
                /* No more exthdr -> evaluate */
-                if (nexthdr == NEXTHDR_NONE) {
+               if (nexthdr == NEXTHDR_NONE) {
                        temp |= MASK_NONE;
                        break;
                }
                /* ESP -> evaluate */
-                if (nexthdr == NEXTHDR_ESP) {
+               if (nexthdr == NEXTHDR_ESP) {
                        temp |= MASK_ESP;
                        break;
                }
@@ -72,43 +72,43 @@ ipv6header_match(const struct sk_buff *skb,
                BUG_ON(hp == NULL);
 
                /* Calculate the header length */
-                if (nexthdr == NEXTHDR_FRAGMENT) {
-                        hdrlen = 8;
-                } else if (nexthdr == NEXTHDR_AUTH)
-                        hdrlen = (hp->hdrlen+2)<<2;
-                else
-                        hdrlen = ipv6_optlen(hp);
+               if (nexthdr == NEXTHDR_FRAGMENT) {
+                       hdrlen = 8;
+               } else if (nexthdr == NEXTHDR_AUTH)
+                       hdrlen = (hp->hdrlen + 2) << 2;
+               else
+                       hdrlen = ipv6_optlen(hp);
 
                /* set the flag */
-               switch (nexthdr){
-                       case NEXTHDR_HOP:
-                               temp |= MASK_HOPOPTS;
-                               break;
-                       case NEXTHDR_ROUTING:
-                               temp |= MASK_ROUTING;
-                               break;
-                       case NEXTHDR_FRAGMENT:
-                               temp |= MASK_FRAGMENT;
-                               break;
-                       case NEXTHDR_AUTH:
-                               temp |= MASK_AH;
-                               break;
-                       case NEXTHDR_DEST:
-                               temp |= MASK_DSTOPTS;
-                               break;
-                       default:
-                               return 0;
-                               break;
+               switch (nexthdr) {
+               case NEXTHDR_HOP:
+                       temp |= MASK_HOPOPTS;
+                       break;
+               case NEXTHDR_ROUTING:
+                       temp |= MASK_ROUTING;
+                       break;
+               case NEXTHDR_FRAGMENT:
+                       temp |= MASK_FRAGMENT;
+                       break;
+               case NEXTHDR_AUTH:
+                       temp |= MASK_AH;
+                       break;
+               case NEXTHDR_DEST:
+                       temp |= MASK_DSTOPTS;
+                       break;
+               default:
+                       return 0;
+                       break;
                }
 
-                nexthdr = hp->nexthdr;
-                len -= hdrlen;
-                ptr += hdrlen;
+               nexthdr = hp->nexthdr;
+               len -= hdrlen;
+               ptr += hdrlen;
                if (ptr > skb->len)
                        break;
-        }
+       }
 
-       if ( (nexthdr != NEXTHDR_NONE ) && (nexthdr != NEXTHDR_ESP) )
+       if ((nexthdr != NEXTHDR_NONE) && (nexthdr != NEXTHDR_ESP))
                temp |= MASK_PROTO;
 
        if (info->modeflag)
@@ -137,8 +137,8 @@ ipv6header_checkentry(const char *tablename,
                return 0;
 
        /* invflags is 0 or 0xff in hard mode */
-       if ((!info->modeflag) && info->invflags != 0x00
-                             && info->invflags != 0xFF)
+       if ((!info->modeflag) && info->invflags != 0x00 &&
+           info->invflags != 0xFF)
                return 0;
 
        return 1;
@@ -152,7 +152,7 @@ static struct ip6t_match ip6t_ipv6header_match = {
        .me             = THIS_MODULE,
 };
 
-static int  __init ipv6header_init(void)
+static int __init ipv6header_init(void)
 {
        return ip6t_register_match(&ip6t_ipv6header_match);
 }
@@ -164,4 +164,3 @@ static void __exit ipv6header_exit(void)
 
 module_init(ipv6header_init);
 module_exit(ipv6header_exit);
-
index 5409b375b5121efbc3eea1ede04c7e80bafc471d..8c8a4c7ec9340cee4d778087d3e731b5649ce212 100644 (file)
@@ -36,14 +36,14 @@ match(const struct sk_buff *skb,
        if (!skb->sk || !skb->sk->sk_socket || !skb->sk->sk_socket->file)
                return 0;
 
-       if(info->match & IP6T_OWNER_UID) {
-               if((skb->sk->sk_socket->file->f_uid != info->uid) ^
+       if (info->match & IP6T_OWNER_UID) {
+               if ((skb->sk->sk_socket->file->f_uid != info->uid) ^
                    !!(info->invert & IP6T_OWNER_UID))
                        return 0;
        }
 
-       if(info->match & IP6T_OWNER_GID) {
-               if((skb->sk->sk_socket->file->f_gid != info->gid) ^
+       if (info->match & IP6T_OWNER_GID) {
+               if ((skb->sk->sk_socket->file->f_gid != info->gid) ^
                    !!(info->invert & IP6T_OWNER_GID))
                        return 0;
        }
@@ -53,23 +53,23 @@ match(const struct sk_buff *skb,
 
 static int
 checkentry(const char *tablename,
-           const void  *ip,
-           void *matchinfo,
-           unsigned int matchsize,
-           unsigned int hook_mask)
+          const void *ip,
+          void *matchinfo,
+          unsigned int matchsize,
+          unsigned int hook_mask)
 {
        const struct ip6t_owner_info *info = matchinfo;
 
-        if (hook_mask
-            & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) {
-                printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n");
-                return 0;
-        }
+       if (hook_mask
+           & ~((1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING))) {
+               printk("ip6t_owner: only valid for LOCAL_OUT or POST_ROUTING.\n");
+               return 0;
+       }
 
        if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_owner_info)))
                return 0;
 
-       if (info->match & (IP6T_OWNER_PID|IP6T_OWNER_SID)) {
+       if (info->match & (IP6T_OWNER_PID | IP6T_OWNER_SID)) {
                printk("ipt_owner: pid and sid matching "
                       "not supported anymore\n");
                return 0;
index 13fedad48c1d76a549aa4c467d75b571e7002a76..afe1cc4c18a5bffc734d6011b1dfddd395007158 100644 (file)
@@ -118,7 +118,7 @@ static int match(const struct sk_buff *skb,
        return ret;
 }
 
-static int checkentry(const char *tablename, const struct ip6t_ip6 *ip,
+static int checkentry(const char *tablename, const void *ip_void,
                       void *matchinfo, unsigned int matchsize,
                       unsigned int hook_mask)
 {
index 8465b4375855862f7f196ce260b0a64ef2d61343..8f82476dc89e6a46d6d644fb95abbcf568718ea1 100644 (file)
@@ -33,12 +33,12 @@ MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>");
 static inline int
 segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, int invert)
 {
-       int r=0;
-       DEBUGP("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ',
-              min,id,max);
-       r=(id >= min && id <= max) ^ invert;
-       DEBUGP(" result %s\n",r? "PASS" : "FAILED");
-       return r;
+       int r = 0;
+       DEBUGP("rt segsleft_match:%c 0x%x <= 0x%x <= 0x%x",
+              invert ? '!' : ' ', min, id, max);
+       r = (id >= min && id <= max) ^ invert;
+       DEBUGP(" result %s\n", r ? "PASS" : "FAILED");
+       return r;
 }
 
 static int
@@ -50,87 +50,93 @@ match(const struct sk_buff *skb,
       unsigned int protoff,
       int *hotdrop)
 {
-       struct ipv6_rt_hdr _route, *rh;
-       const struct ip6t_rt *rtinfo = matchinfo;
-       unsigned int temp;
-       unsigned int ptr;
-       unsigned int hdrlen = 0;
-       unsigned int ret = 0;
-       struct in6_addr *ap, _addr;
+       struct ipv6_rt_hdr _route, *rh;
+       const struct ip6t_rt *rtinfo = matchinfo;
+       unsigned int temp;
+       unsigned int ptr;
+       unsigned int hdrlen = 0;
+       unsigned int ret = 0;
+       struct in6_addr *ap, _addr;
 
        if (ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL) < 0)
                return 0;
 
-       rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route);
-       if (rh == NULL){
-              *hotdrop = 1;
-                       return 0;
-       }
-
-       hdrlen = ipv6_optlen(rh);
-       if (skb->len - ptr < hdrlen){
-              /* Pcket smaller than its length field */
-                       return 0;
-       }
-
-       DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
-       DEBUGP("TYPE %04X ", rh->type);
-       DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
-
-       DEBUGP("IPv6 RT segsleft %02X ",
-                       (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
-                           rh->segments_left,
-                           !!(rtinfo->invflags & IP6T_RT_INV_SGS))));
-       DEBUGP("type %02X %02X %02X ",
-                       rtinfo->rt_type, rh->type, 
-                       (!(rtinfo->flags & IP6T_RT_TYP) ||
-                           ((rtinfo->rt_type == rh->type) ^
-                           !!(rtinfo->invflags & IP6T_RT_INV_TYP))));
-       DEBUGP("len %02X %04X %02X ",
-                       rtinfo->hdrlen, hdrlen,
-                       (!(rtinfo->flags & IP6T_RT_LEN) ||
-                           ((rtinfo->hdrlen == hdrlen) ^
-                           !!(rtinfo->invflags & IP6T_RT_INV_LEN))));
-       DEBUGP("res %02X %02X %02X ", 
-                       (rtinfo->flags & IP6T_RT_RES), ((struct rt0_hdr *)rh)->reserved,
-                       !((rtinfo->flags & IP6T_RT_RES) && (((struct rt0_hdr *)rh)->reserved)));
-
-       ret = (rh != NULL)
-                       &&
-                       (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
-                           rh->segments_left,
-                           !!(rtinfo->invflags & IP6T_RT_INV_SGS)))
-               &&
-               (!(rtinfo->flags & IP6T_RT_LEN) ||
-                           ((rtinfo->hdrlen == hdrlen) ^
-                           !!(rtinfo->invflags & IP6T_RT_INV_LEN)))
-               &&
-                       (!(rtinfo->flags & IP6T_RT_TYP) ||
-                           ((rtinfo->rt_type == rh->type) ^
-                           !!(rtinfo->invflags & IP6T_RT_INV_TYP)));
+       rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route);
+       if (rh == NULL) {
+               *hotdrop = 1;
+               return 0;
+       }
+
+       hdrlen = ipv6_optlen(rh);
+       if (skb->len - ptr < hdrlen) {
+               /* Pcket smaller than its length field */
+               return 0;
+       }
+
+       DEBUGP("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen);
+       DEBUGP("TYPE %04X ", rh->type);
+       DEBUGP("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left);
+
+       DEBUGP("IPv6 RT segsleft %02X ",
+              (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
+                              rh->segments_left,
+                              !!(rtinfo->invflags & IP6T_RT_INV_SGS))));
+       DEBUGP("type %02X %02X %02X ",
+              rtinfo->rt_type, rh->type,
+              (!(rtinfo->flags & IP6T_RT_TYP) ||
+               ((rtinfo->rt_type == rh->type) ^
+                !!(rtinfo->invflags & IP6T_RT_INV_TYP))));
+       DEBUGP("len %02X %04X %02X ",
+              rtinfo->hdrlen, hdrlen,
+              (!(rtinfo->flags & IP6T_RT_LEN) ||
+               ((rtinfo->hdrlen == hdrlen) ^
+                !!(rtinfo->invflags & IP6T_RT_INV_LEN))));
+       DEBUGP("res %02X %02X %02X ",
+              (rtinfo->flags & IP6T_RT_RES),
+              ((struct rt0_hdr *)rh)->reserved,
+              !((rtinfo->flags & IP6T_RT_RES) &&
+                (((struct rt0_hdr *)rh)->reserved)));
+
+       ret = (rh != NULL)
+             &&
+             (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1],
+                             rh->segments_left,
+                             !!(rtinfo->invflags & IP6T_RT_INV_SGS)))
+             &&
+             (!(rtinfo->flags & IP6T_RT_LEN) ||
+              ((rtinfo->hdrlen == hdrlen) ^
+               !!(rtinfo->invflags & IP6T_RT_INV_LEN)))
+             &&
+             (!(rtinfo->flags & IP6T_RT_TYP) ||
+              ((rtinfo->rt_type == rh->type) ^
+               !!(rtinfo->invflags & IP6T_RT_INV_TYP)));
 
        if (ret && (rtinfo->flags & IP6T_RT_RES)) {
                u_int32_t *rp, _reserved;
                rp = skb_header_pointer(skb,
-                                       ptr + offsetof(struct rt0_hdr, reserved),
-                                       sizeof(_reserved), &_reserved);
+                                       ptr + offsetof(struct rt0_hdr,
+                                                      reserved),
+                                       sizeof(_reserved),
+                                       &_reserved);
 
                ret = (*rp == 0);
        }
 
-       DEBUGP("#%d ",rtinfo->addrnr);
-       if ( !(rtinfo->flags & IP6T_RT_FST) ){
-              return ret;
+       DEBUGP("#%d ", rtinfo->addrnr);
+       if (!(rtinfo->flags & IP6T_RT_FST)) {
+               return ret;
        } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) {
                DEBUGP("Not strict ");
-               if ( rtinfo->addrnr > (unsigned int)((hdrlen-8)/16) ){
+               if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
                        DEBUGP("There isn't enough space\n");
                        return 0;
                } else {
                        unsigned int i = 0;
 
-                       DEBUGP("#%d ",rtinfo->addrnr);
-                       for(temp=0; temp<(unsigned int)((hdrlen-8)/16); temp++){
+                       DEBUGP("#%d ", rtinfo->addrnr);
+                       for (temp = 0;
+                            temp < (unsigned int)((hdrlen - 8) / 16);
+                            temp++) {
                                ap = skb_header_pointer(skb,
                                                        ptr
                                                        + sizeof(struct rt0_hdr)
@@ -141,24 +147,26 @@ match(const struct sk_buff *skb,
                                BUG_ON(ap == NULL);
 
                                if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) {
-                                       DEBUGP("i=%d temp=%d;\n",i,temp);
+                                       DEBUGP("i=%d temp=%d;\n", i, temp);
                                        i++;
                                }
-                               if (i==rtinfo->addrnr) break;
+                               if (i == rtinfo->addrnr)
+                                       break;
                        }
                        DEBUGP("i=%d #%d\n", i, rtinfo->addrnr);
                        if (i == rtinfo->addrnr)
                                return ret;
-                       else return 0;
+                       else
+                               return 0;
                }
        } else {
                DEBUGP("Strict ");
-               if ( rtinfo->addrnr > (unsigned int)((hdrlen-8)/16) ){
+               if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) {
                        DEBUGP("There isn't enough space\n");
                        return 0;
                } else {
-                       DEBUGP("#%d ",rtinfo->addrnr);
-                       for(temp=0; temp<rtinfo->addrnr; temp++){
+                       DEBUGP("#%d ", rtinfo->addrnr);
+                       for (temp = 0; temp < rtinfo->addrnr; temp++) {
                                ap = skb_header_pointer(skb,
                                                        ptr
                                                        + sizeof(struct rt0_hdr)
@@ -171,9 +179,11 @@ match(const struct sk_buff *skb,
                                        break;
                        }
                        DEBUGP("temp=%d #%d\n", temp, rtinfo->addrnr);
-                       if ((temp == rtinfo->addrnr) && (temp == (unsigned int)((hdrlen-8)/16)))
+                       if ((temp == rtinfo->addrnr) &&
+                           (temp == (unsigned int)((hdrlen - 8) / 16)))
                                return ret;
-                       else return 0;
+                       else
+                               return 0;
                }
        }
 
@@ -183,32 +193,31 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const void *entry,
-          void *matchinfo,
-          unsigned int matchinfosize,
-          unsigned int hook_mask)
+          const void *entry,
+          void *matchinfo,
+          unsigned int matchinfosize,
+          unsigned int hook_mask)
 {
-       const struct ip6t_rt *rtinfo = matchinfo;
-
-       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_rt))) {
-              DEBUGP("ip6t_rt: matchsize %u != %u\n",
-                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_rt)));
-              return 0;
-       }
-       if (rtinfo->invflags & ~IP6T_RT_INV_MASK) {
-              DEBUGP("ip6t_rt: unknown flags %X\n",
-                      rtinfo->invflags);
-              return 0;
-       }
-       if ( (rtinfo->flags & (IP6T_RT_RES|IP6T_RT_FST_MASK)) && 
-                      (!(rtinfo->flags & IP6T_RT_TYP) || 
-                      (rtinfo->rt_type != 0) || 
-                      (rtinfo->invflags & IP6T_RT_INV_TYP)) ) {
-             DEBUGP("`--rt-type 0' required before `--rt-0-*'");
-              return 0;
-       }
-
-       return 1;
+       const struct ip6t_rt *rtinfo = matchinfo;
+
+       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_rt))) {
+               DEBUGP("ip6t_rt: matchsize %u != %u\n",
+                      matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_rt)));
+               return 0;
+       }
+       if (rtinfo->invflags & ~IP6T_RT_INV_MASK) {
+               DEBUGP("ip6t_rt: unknown flags %X\n", rtinfo->invflags);
+               return 0;
+       }
+       if ((rtinfo->flags & (IP6T_RT_RES | IP6T_RT_FST_MASK)) &&
+           (!(rtinfo->flags & IP6T_RT_TYP) ||
+            (rtinfo->rt_type != 0) ||
+            (rtinfo->invflags & IP6T_RT_INV_TYP))) {
+               DEBUGP("`--rt-type 0' required before `--rt-0-*'");
+               return 0;
+       }
+
+       return 1;
 }
 
 static struct ip6t_match rt_match = {
@@ -220,12 +229,12 @@ static struct ip6t_match rt_match = {
 
 static int __init init(void)
 {
-       return ip6t_register_match(&rt_match);
+       return ip6t_register_match(&rt_match);
 }
 
 static void __exit cleanup(void)
 {
-       ip6t_unregister_match(&rt_match);
+       ip6t_unregister_match(&rt_match);
 }
 
 module_init(init);
index 43f1ce74187d524122d8fbf6d325c7395dfb534d..ae86d237a4569b47e764db78ebe359390bc63359 100644 (file)
@@ -1620,6 +1620,7 @@ static int key_notify_sa_flush(struct km_event *c)
                return -ENOBUFS;
        hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
        hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto);
+       hdr->sadb_msg_type = SADB_FLUSH;
        hdr->sadb_msg_seq = c->seq;
        hdr->sadb_msg_pid = c->pid;
        hdr->sadb_msg_version = PF_KEY_V2;
@@ -2385,6 +2386,7 @@ static int key_notify_policy_flush(struct km_event *c)
        if (!skb_out)
                return -ENOBUFS;
        hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
+       hdr->sadb_msg_type = SADB_X_SPDFLUSH;
        hdr->sadb_msg_seq = c->seq;
        hdr->sadb_msg_pid = c->pid;
        hdr->sadb_msg_version = PF_KEY_V2;
index ee93abc71cb8aaefde9f24bbd6a5e5326fc5aee3..9db7dbdb16e6bd14c7ca23dc6c8dfe11e16f4566 100644 (file)
@@ -365,7 +365,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
         */
         
        err = -EMSGSIZE;
-       if(len>dev->mtu+dev->hard_header_len)
+       if (len > dev->mtu + dev->hard_header_len)
                goto out_unlock;
 
        err = -ENOBUFS;
@@ -935,7 +935,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add
         *      Check legality
         */
         
-       if(addr_len!=sizeof(struct sockaddr))
+       if (addr_len != sizeof(struct sockaddr))
                return -EINVAL;
        strlcpy(name,uaddr->sa_data,sizeof(name));
 
@@ -1092,7 +1092,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
         *      retries.
         */
 
-       if(skb==NULL)
+       if (skb == NULL)
                goto out;
 
        /*
@@ -1392,8 +1392,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
        if (level != SOL_PACKET)
                return -ENOPROTOOPT;
 
-       if (get_user(len,optlen))
-               return -EFAULT;
+       if (get_user(len, optlen))
+               return -EFAULT;
 
        if (len < 0)
                return -EINVAL;
@@ -1419,9 +1419,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
                return -ENOPROTOOPT;
        }
 
-       if (put_user(len, optlen))
-               return -EFAULT;
-       return 0;
+       if (put_user(len, optlen))
+               return -EFAULT;
+       return 0;
 }
 
 
index 3ac81cdd1211ce2c58fa4729b65fabe464097294..3e7466900bd4e1260728ed63759d60794a88c69e 100644 (file)
@@ -81,7 +81,7 @@ static int krxtimod(void *arg)
 
        for (;;) {
                unsigned long jif;
-               signed long timeout;
+               long timeout;
 
                /* deal with the server being asked to die */
                if (krxtimod_die) {
index 3b5ecd8e2401f85c9d2f14a64ec33d3ef38db14e..29975d99d864de1a1d8ea5b2b4a91a719c8b0861 100644 (file)
@@ -361,7 +361,7 @@ static void rxrpc_proc_peers_stop(struct seq_file *p, void *v)
 static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
 {
        struct rxrpc_peer *peer = list_entry(v, struct rxrpc_peer, proc_link);
-       signed long timeout;
+       long timeout;
 
        /* display header on line 1 */
        if (v == SEQ_START_TOKEN) {
@@ -373,8 +373,8 @@ static int rxrpc_proc_peers_show(struct seq_file *m, void *v)
        /* display one peer per line on subsequent lines */
        timeout = 0;
        if (!list_empty(&peer->timeout.link))
-               timeout = (signed long) peer->timeout.timo_jif -
-                       (signed long) jiffies;
+               timeout = (long) peer->timeout.timo_jif -
+                       (long) jiffies;
 
        seq_printf(m, "%5hu %08x %5d %5d %8ld %5Zu %7lu\n",
                   peer->trans->port,
@@ -468,7 +468,7 @@ static void rxrpc_proc_conns_stop(struct seq_file *p, void *v)
 static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
 {
        struct rxrpc_connection *conn;
-       signed long timeout;
+       long timeout;
 
        conn = list_entry(v, struct rxrpc_connection, proc_link);
 
@@ -484,8 +484,8 @@ static int rxrpc_proc_conns_show(struct seq_file *m, void *v)
        /* display one conn per line on subsequent lines */
        timeout = 0;
        if (!list_empty(&conn->timeout.link))
-               timeout = (signed long) conn->timeout.timo_jif -
-                       (signed long) jiffies;
+               timeout = (long) conn->timeout.timo_jif -
+                       (long) jiffies;
 
        seq_printf(m,
                   "%5hu %08x %5hu %04hx %08x %-3.3s %08x %08x %5Zu %8ld\n",
index 5b3a3e48ed92e64d8d45f05fc32e820a5f701024..1641db33a994020f557cee3d42ab0cc5505a2743 100644 (file)
@@ -228,14 +228,13 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
        }
        sch_tree_unlock(sch);
 
-       for (i=0; i<=TC_PRIO_MAX; i++) {
-               int band = q->prio2band[i];
-               if (q->queues[band] == &noop_qdisc) {
+       for (i=0; i<q->bands; i++) {
+               if (q->queues[i] == &noop_qdisc) {
                        struct Qdisc *child;
                        child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
                        if (child) {
                                sch_tree_lock(sch);
-                               child = xchg(&q->queues[band], child);
+                               child = xchg(&q->queues[i], child);
 
                                if (child != &noop_qdisc)
                                        qdisc_destroy(child);
index 8734bb7280e36dadd1e1bc87aeff3b8401d85ceb..86d8da0cbd027262024277ea1ba7fe14164f7d4f 100644 (file)
@@ -144,6 +144,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
                if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
                    (iph->protocol == IPPROTO_TCP ||
                     iph->protocol == IPPROTO_UDP ||
+                    iph->protocol == IPPROTO_SCTP ||
+                    iph->protocol == IPPROTO_DCCP ||
                     iph->protocol == IPPROTO_ESP))
                        h2 ^= *(((u32*)iph) + iph->ihl);
                break;
@@ -155,6 +157,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
                h2 = iph->saddr.s6_addr32[3]^iph->nexthdr;
                if (iph->nexthdr == IPPROTO_TCP ||
                    iph->nexthdr == IPPROTO_UDP ||
+                   iph->nexthdr == IPPROTO_SCTP ||
+                   iph->nexthdr == IPPROTO_DCCP ||
                    iph->nexthdr == IPPROTO_ESP)
                        h2 ^= *(u32*)&iph[1];
                break;
index 4aa6fc60357ca10f76bf3c918a76d88fe1ba2b00..cb78b50868eee0c765884565dbdc4b6d4c2df01b 100644 (file)
@@ -257,20 +257,26 @@ int sctp_rcv(struct sk_buff *skb)
         */
        sctp_bh_lock_sock(sk);
 
+       /* It is possible that the association could have moved to a different
+        * socket if it is peeled off. If so, update the sk.
+        */ 
+       if (sk != rcvr->sk) {
+               sctp_bh_lock_sock(rcvr->sk);
+               sctp_bh_unlock_sock(sk);
+               sk = rcvr->sk;
+       }
+
        if (sock_owned_by_user(sk))
                sk_add_backlog(sk, skb);
        else
                sctp_backlog_rcv(sk, skb);
 
-       /* Release the sock and any reference counts we took in the
-        * lookup calls.
+       /* Release the sock and the sock ref we took in the lookup calls.
+        * The asoc/ep ref will be released in sctp_backlog_rcv.
         */
        sctp_bh_unlock_sock(sk);
-       if (asoc)
-               sctp_association_put(asoc);
-       else
-               sctp_endpoint_put(ep);
        sock_put(sk);
+
        return ret;
 
 discard_it:
@@ -296,12 +302,50 @@ discard_release:
 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
        struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
-       struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
-
-       sctp_inq_push(inqueue, chunk);
+       struct sctp_inq *inqueue = NULL;
+       struct sctp_ep_common *rcvr = NULL;
+
+       rcvr = chunk->rcvr;
+
+       BUG_TRAP(rcvr->sk == sk);
+
+       if (rcvr->dead) {
+               sctp_chunk_free(chunk);
+       } else {
+               inqueue = &chunk->rcvr->inqueue;
+               sctp_inq_push(inqueue, chunk);
+       }
+
+       /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */ 
+       if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+               sctp_association_put(sctp_assoc(rcvr));
+       else
+               sctp_endpoint_put(sctp_ep(rcvr));
+  
         return 0;
 }
 
+void sctp_backlog_migrate(struct sctp_association *assoc, 
+                         struct sock *oldsk, struct sock *newsk)
+{
+       struct sk_buff *skb;
+       struct sctp_chunk *chunk;
+
+       skb = oldsk->sk_backlog.head;
+       oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
+       while (skb != NULL) {
+               struct sk_buff *next = skb->next;
+
+               chunk = SCTP_INPUT_CB(skb)->chunk;
+               skb->next = NULL;
+               if (&assoc->base == chunk->rcvr)
+                       sk_add_backlog(newsk, skb);
+               else
+                       sk_add_backlog(oldsk, skb);
+               skb = next;
+       }
+}
+
 /* Handle icmp frag needed error. */
 void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
                           struct sctp_transport *t, __u32 pmtu)
@@ -544,10 +588,16 @@ int sctp_rcv_ootb(struct sk_buff *skb)
        sctp_errhdr_t *err;
 
        ch = (sctp_chunkhdr_t *) skb->data;
-       ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length));
 
        /* Scan through all the chunks in the packet.  */
-       while (ch_end > (__u8 *)ch && ch_end < skb->tail) {
+       do {
+               /* Break out if chunk length is less then minimal. */
+               if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
+                       break;
+
+               ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+               if (ch_end > skb->tail)
+                       break;
 
                /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
                 * receiver MUST silently discard the OOTB packet and take no
@@ -578,8 +628,7 @@ int sctp_rcv_ootb(struct sk_buff *skb)
                }
 
                ch = (sctp_chunkhdr_t *) ch_end;
-               ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length));
-       }
+       } while (ch_end < skb->tail);
 
        return 0;
 
index 2d33922c044bb5108075de3fbff111a1207d66c7..297b8951463e80293da9a029384607e5f094c121 100644 (file)
@@ -73,8 +73,10 @@ void sctp_inq_free(struct sctp_inq *queue)
        /* If there is a packet which is currently being worked on,
         * free it as well.
         */
-       if (queue->in_progress)
+       if (queue->in_progress) {
                sctp_chunk_free(queue->in_progress);
+               queue->in_progress = NULL;
+       }
 
        if (queue->malloced) {
                /* Dump the master memory segment.  */
index 6e4dc28874d7690884753d32542fb039ed5e0b6d..d47a52c303a81da44da5f8d8c478f77049e69483 100644 (file)
@@ -176,7 +176,7 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa
 
 static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
 {
-       if (*pos > sctp_ep_hashsize)
+       if (*pos >= sctp_ep_hashsize)
                return NULL;
 
        if (*pos < 0)
@@ -185,8 +185,6 @@ static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
        if (*pos == 0)
                seq_printf(seq, " ENDPT     SOCK   STY SST HBKT LPORT   UID INODE LADDRS\n");
 
-       ++*pos;
-
        return (void *)pos;
 }
 
@@ -198,11 +196,9 @@ static void sctp_eps_seq_stop(struct seq_file *seq, void *v)
 
 static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-       if (*pos > sctp_ep_hashsize)
+       if (++*pos >= sctp_ep_hashsize)
                return NULL;
 
-       ++*pos;
-
        return pos;
 }
 
@@ -214,19 +210,19 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
        struct sctp_ep_common *epb;
        struct sctp_endpoint *ep;
        struct sock *sk;
-       int    hash = *(int *)v;
+       int    hash = *(loff_t *)v;
 
-       if (hash > sctp_ep_hashsize)
+       if (hash >= sctp_ep_hashsize)
                return -ENOMEM;
 
-       head = &sctp_ep_hashtable[hash-1];
+       head = &sctp_ep_hashtable[hash];
        sctp_local_bh_disable();
        read_lock(&head->lock);
        for (epb = head->chain; epb; epb = epb->next) {
                ep = sctp_ep(epb);
                sk = epb->sk;
                seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
-                          sctp_sk(sk)->type, sk->sk_state, hash-1,
+                          sctp_sk(sk)->type, sk->sk_state, hash,
                           epb->bind_addr.port,
                           sock_i_uid(sk), sock_i_ino(sk));
 
@@ -283,7 +279,7 @@ void sctp_eps_proc_exit(void)
 
 static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
 {
-       if (*pos > sctp_assoc_hashsize)
+       if (*pos >= sctp_assoc_hashsize)
                return NULL;
 
        if (*pos < 0)
@@ -293,8 +289,6 @@ static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
                seq_printf(seq, " ASSOC     SOCK   STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT "
                                "RPORT LADDRS <-> RADDRS\n");
 
-       ++*pos;
-
        return (void *)pos;
 }
 
@@ -306,11 +300,9 @@ static void sctp_assocs_seq_stop(struct seq_file *seq, void *v)
 
 static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-       if (*pos > sctp_assoc_hashsize)
+       if (++*pos >= sctp_assoc_hashsize)
                return NULL;
 
-       ++*pos;
-
        return pos;
 }
 
@@ -321,12 +313,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        struct sctp_ep_common *epb;
        struct sctp_association *assoc;
        struct sock *sk;
-       int    hash = *(int *)v;
+       int    hash = *(loff_t *)v;
 
-       if (hash > sctp_assoc_hashsize)
+       if (hash >= sctp_assoc_hashsize)
                return -ENOMEM;
 
-       head = &sctp_assoc_hashtable[hash-1];
+       head = &sctp_assoc_hashtable[hash];
        sctp_local_bh_disable();
        read_lock(&head->lock);
        for (epb = head->chain; epb; epb = epb->next) {
@@ -335,7 +327,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
                seq_printf(seq,
                           "%8p %8p %-3d %-3d %-2d %-4d %4d %8d %8d %7d %5lu %-5d %5d ",
                           assoc, sk, sctp_sk(sk)->type, sk->sk_state,
-                          assoc->state, hash-1, assoc->assoc_id,
+                          assoc->state, hash, assoc->assoc_id,
                           (sk->sk_rcvbuf - assoc->rwnd),
                           assoc->sndbuf_used,
                           sock_i_uid(sk), sock_i_ino(sk),
index 556c495c6922587e3f449957ee2b536860674d86..5e0de3c0eead5309e908ec7365e53d2dcf759932 100644 (file)
@@ -1275,7 +1275,12 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
        unsigned int keylen;
        char *key;
 
-       headersize = sizeof(sctp_paramhdr_t) + SCTP_SECRET_SIZE;
+       /* Header size is static data prior to the actual cookie, including
+        * any padding.
+        */
+       headersize = sizeof(sctp_paramhdr_t) + 
+                    (sizeof(struct sctp_signed_cookie) - 
+                     sizeof(struct sctp_cookie));
        bodysize = sizeof(struct sctp_cookie)
                + ntohs(init_chunk->chunk_hdr->length) + addrs_len;
 
@@ -1354,7 +1359,7 @@ struct sctp_association *sctp_unpack_cookie(
        struct sctp_signed_cookie *cookie;
        struct sctp_cookie *bear_cookie;
        int headersize, bodysize, fixed_size;
-       __u8 digest[SCTP_SIGNATURE_SIZE];
+       __u8 *digest = ep->digest;
        struct scatterlist sg;
        unsigned int keylen, len;
        char *key;
@@ -1362,7 +1367,12 @@ struct sctp_association *sctp_unpack_cookie(
        struct sk_buff *skb = chunk->skb;
        struct timeval tv;
 
-       headersize = sizeof(sctp_chunkhdr_t) + SCTP_SECRET_SIZE;
+       /* Header size is static data prior to the actual cookie, including
+        * any padding.
+        */
+       headersize = sizeof(sctp_chunkhdr_t) +
+                    (sizeof(struct sctp_signed_cookie) - 
+                     sizeof(struct sctp_cookie));
        bodysize = ntohs(chunk->chunk_hdr->length) - headersize;
        fixed_size = headersize + sizeof(struct sctp_cookie);
 
index b8b38aba92b3aa4496e5f2e668a1ba6bfe70c6ab..8d1dc24bab4c1f1da541c760e13955ca54b77d29 100644 (file)
@@ -1300,7 +1300,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                                        "T1 INIT Timeout adjustment"
                                        " init_err_counter: %d"
                                        " cycle: %d"
-                                       " timeout: %d\n",
+                                       " timeout: %ld\n",
                                        asoc->init_err_counter,
                                        asoc->init_cycle,
                                        asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT]);
@@ -1328,7 +1328,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                        SCTP_DEBUG_PRINTK(
                                "T1 COOKIE Timeout adjustment"
                                " init_err_counter: %d"
-                               " timeout: %d\n",
+                               " timeout: %ld\n",
                                asoc->init_err_counter,
                                asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE]);
 
index 477d7f80dba686713ac6b10288f343a581e1ba5b..2b9a832b29a70bdbb46d0ce401d680c9a0121654 100644 (file)
@@ -884,7 +884,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
 {
        struct sctp_transport *transport = (struct sctp_transport *) arg;
 
-       if (asoc->overall_error_count > asoc->max_retrans) {
+       if (asoc->overall_error_count >= asoc->max_retrans) {
                /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
                sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
                                SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -2122,7 +2122,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
        struct sctp_bind_addr *bp;
        int attempts = asoc->init_err_counter + 1;
 
-       if (attempts >= asoc->max_init_attempts) {
+       if (attempts > asoc->max_init_attempts) {
                sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
                                SCTP_U32(SCTP_ERROR_STALE_COOKIE));
                return SCTP_DISPOSITION_DELETE_TCB;
@@ -3090,6 +3090,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
                        break;
 
                ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+               if (ch_end > skb->tail)
+                       break;
 
                if (SCTP_CID_SHUTDOWN_ACK == ch->type)
                        ootb_shut_ack = 1;
@@ -4638,7 +4640,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
 
        SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n");
 
-       if (attempts < asoc->max_init_attempts) {
+       if (attempts <= asoc->max_init_attempts) {
                bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
                repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0);
                if (!repl)
@@ -4695,7 +4697,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
 
        SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n");
 
-       if (attempts < asoc->max_init_attempts) {
+       if (attempts <= asoc->max_init_attempts) {
                repl = sctp_make_cookie_echo(asoc, NULL);
                if (!repl)
                        return SCTP_DISPOSITION_NOMEM;
index c98ee375ba5e11883bb2df13378b450b11f77ff0..0ea947eb681320561d3e3d61ac91001aa1043926 100644 (file)
@@ -2995,7 +2995,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
        sp->hbinterval  = jiffies_to_msecs(sctp_hb_interval);
        sp->pathmaxrxt  = sctp_max_retrans_path;
        sp->pathmtu     = 0; // allow default discovery
-       sp->sackdelay   = sctp_sack_timeout;
+       sp->sackdelay   = jiffies_to_msecs(sctp_sack_timeout);
        sp->param_flags = SPP_HB_ENABLE |
                          SPP_PMTUD_ENABLE |
                          SPP_SACKDELAY_ENABLE;
@@ -5426,7 +5426,7 @@ out:
        return err;
 
 do_error:
-       if (asoc->init_err_counter + 1 >= asoc->max_init_attempts)
+       if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
                err = -ETIMEDOUT;
        else
                err = -ECONNREFUSED;
@@ -5602,8 +5602,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
         */
        newsp->type = type;
 
+       spin_lock_bh(&oldsk->sk_lock.slock);
+       /* Migrate the backlog from oldsk to newsk. */
+       sctp_backlog_migrate(assoc, oldsk, newsk);
        /* Migrate the association to the new socket. */
        sctp_assoc_migrate(assoc, newsk);
+       spin_unlock_bh(&oldsk->sk_lock.slock);
 
        /* If the association on the newsk is already closed before accept()
         * is called, set RCV_SHUTDOWN flag.
index fcd7096c953d5d5698b3ce456f3512915b66c304..dc6f3ff32358c0dafd1b8ae1f63d610a5106e9b5 100644 (file)
@@ -159,12 +159,9 @@ static ctl_table sctp_table[] = {
                .ctl_name       = NET_SCTP_PRESERVE_ENABLE,
                .procname       = "cookie_preserve_enable",
                .data           = &sctp_cookie_preserve_enable,
-               .maxlen         = sizeof(long),
+               .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = &proc_doulongvec_ms_jiffies_minmax,
-               .strategy       = &sctp_sysctl_jiffies_ms,
-               .extra1         = &rto_timer_min,
-               .extra2         = &rto_timer_max
+               .proc_handler   = &proc_dointvec
        },
        {
                .ctl_name       = NET_SCTP_RTO_ALPHA,
index 68d73e2dd155e33c2ca06e7bbd283b5a66738609..160f62ad1cc55f924e7fc591e7df3a7259f5376a 100644 (file)
@@ -350,7 +350,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
        tp->rto_pending = 0;
 
        SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d "
-                         "rttvar: %d, rto: %d\n", __FUNCTION__,
+                         "rttvar: %d, rto: %ld\n", __FUNCTION__,
                          tp, rtt, tp->srtt, tp->rttvar, tp->rto);
 }
 
index e4ada15ed856feba1f8c26683969c56cac1958b7..23632d84d8d7c105c2e61cf37a33e11c2e140fe5 100644 (file)
@@ -420,7 +420,8 @@ static int rsc_parse(struct cache_detail *cd,
                        gss_mech_put(gm);
                        goto out;
                }
-               if (gss_import_sec_context(buf, len, gm, &rsci.mechctx)) {
+               status = gss_import_sec_context(buf, len, gm, &rsci.mechctx);
+               if (status) {
                        gss_mech_put(gm);
                        goto out;
                }
@@ -585,6 +586,20 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
        return SVC_OK;
 }
 
+static int
+gss_write_null_verf(struct svc_rqst *rqstp)
+{
+       u32     *p;
+
+       svc_putu32(rqstp->rq_res.head, htonl(RPC_AUTH_NULL));
+       p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
+       /* don't really need to check if head->iov_len > PAGE_SIZE ... */
+       *p++ = 0;
+       if (!xdr_ressize_check(rqstp, p))
+               return -1;
+       return 0;
+}
+
 static int
 gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
 {
@@ -741,6 +756,21 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
        return SVC_OK;
 }
 
+static inline int
+gss_write_init_verf(struct svc_rqst *rqstp, struct rsi *rsip)
+{
+       struct rsc *rsci;
+
+       if (rsip->major_status != GSS_S_COMPLETE)
+               return gss_write_null_verf(rqstp);
+       rsci = gss_svc_searchbyctx(&rsip->out_handle);
+       if (rsci == NULL) {
+               rsip->major_status = GSS_S_NO_CONTEXT;
+               return gss_write_null_verf(rqstp);
+       }
+       return gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN);
+}
+
 /*
  * Accept an rpcsec packet.
  * If context establishment, punt to user space
@@ -876,11 +906,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
                case -ENOENT:
                        goto drop;
                case 0:
-                       rsci = gss_svc_searchbyctx(&rsip->out_handle);
-                       if (!rsci) {
-                               goto drop;
-                       }
-                       if (gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN))
+                       if (gss_write_init_verf(rqstp, rsip))
                                goto drop;
                        if (resv->iov_len + 4 > PAGE_SIZE)
                                goto drop;
index e67613e4eb18c7d39d0d4fe1c3cf28c6cffb12b0..50580620e89704038f1957fb6b95e2cf8e2dbd12 100644 (file)
@@ -1527,6 +1527,7 @@ svc_defer(struct cache_req *req)
                dr->handle.owner = rqstp->rq_server;
                dr->prot = rqstp->rq_prot;
                dr->addr = rqstp->rq_addr;
+               dr->daddr = rqstp->rq_daddr;
                dr->argslen = rqstp->rq_arg.len >> 2;
                memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
        }
@@ -1552,6 +1553,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
        rqstp->rq_arg.len = dr->argslen<<2;
        rqstp->rq_prot        = dr->prot;
        rqstp->rq_addr        = dr->addr;
+       rqstp->rq_daddr       = dr->daddr;
        return dr->argslen<<2;
 }
 
index 05ab18e62deeb1f0003ba0482e4a806ed1ec4618..3891cc00087d37d18cbf05e21c9060ee257f4efe 100644 (file)
@@ -8,7 +8,12 @@ menu "TIPC Configuration (EXPERIMENTAL)"
 config TIPC
        tristate "The TIPC Protocol (EXPERIMENTAL)"
        ---help---
-         TBD.
+         The Transparent Inter Process Communication (TIPC) protocol is
+         specially designed for intra cluster communication. This protocol
+         originates from Ericsson where it has been used in carrier grade
+         cluster applications for many years.
+       
+         For more information about TIPC, see http://tipc.sourceforge.net.
 
          This protocol support is also available as a module ( = code which
          can be inserted in and removed from the running kernel whenever you
index eca22260c98c59d79c6494b7094a514a011339f2..0be25e175b935641cf889695c40f7d1d545e0eef 100644 (file)
@@ -47,7 +47,7 @@ u32 tipc_get_addr(void)
 }
 
 /**
- * addr_domain_valid - validates a network domain address
+ * tipc_addr_domain_valid - validates a network domain address
  * 
  * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>, 
  * where Z, C, and N are non-zero and do not exceed the configured limits.
@@ -55,7 +55,7 @@ u32 tipc_get_addr(void)
  * Returns 1 if domain address is valid, otherwise 0
  */
 
-int addr_domain_valid(u32 addr)
+int tipc_addr_domain_valid(u32 addr)
 {
        u32 n = tipc_node(addr);
        u32 c = tipc_cluster(addr);
@@ -79,7 +79,7 @@ int addr_domain_valid(u32 addr)
 }
 
 /**
- * addr_node_valid - validates a proposed network address for this node
+ * tipc_addr_node_valid - validates a proposed network address for this node
  * 
  * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed 
  * the configured limits.
@@ -87,8 +87,8 @@ int addr_domain_valid(u32 addr)
  * Returns 1 if address can be used, otherwise 0
  */
 
-int addr_node_valid(u32 addr)
+int tipc_addr_node_valid(u32 addr)
 {
-       return (addr_domain_valid(addr) && tipc_node(addr));
+       return (tipc_addr_domain_valid(addr) && tipc_node(addr));
 }
 
index 02ca71783e2e5b9c2eae5e426e6243460f14fe02..bcfebb3cbbf319f31e8c6bd4af45e7688c1c13bf 100644 (file)
@@ -122,7 +122,7 @@ static inline char *addr_string_fill(char *string, u32 addr)
        return string;
 }
 
-int addr_domain_valid(u32);
-int addr_node_valid(u32 addr);
+int tipc_addr_domain_valid(u32);
+int tipc_addr_node_valid(u32 addr);
 
 #endif
index 9713d622efb864c5b6ab069ff836b855704bc623..a7b04f397c12c322adabc8f4b5be30efebff041c 100644 (file)
@@ -82,7 +82,7 @@ struct bcbearer {
        struct bearer bearer;
        struct media media;
        struct bcbearer_pair bpairs[MAX_BEARERS];
-       struct bcbearer_pair bpairs_temp[TIPC_NUM_LINK_PRI];
+       struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
 };
 
 /**
@@ -104,7 +104,7 @@ static struct bclink *bclink = NULL;
 static struct link *bcl = NULL;
 static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
 
-char bc_link_name[] = "multicast-link";
+char tipc_bclink_name[] = "multicast-link";
 
 
 static inline u32 buf_seqno(struct sk_buff *buf)
@@ -178,19 +178,19 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
                buf = buf->next;                
        }
        if (buf != NULL)
-               link_retransmit(bcl, buf, mod(to - after));
+               tipc_link_retransmit(bcl, buf, mod(to - after));
        spin_unlock_bh(&bc_lock);              
 }
 
 /** 
- * bclink_acknowledge - handle acknowledgement of broadcast packets
+ * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
  * @n_ptr: node that sent acknowledgement info
  * @acked: broadcast sequence # that has been acknowledged
  * 
  * Node is locked, bc_lock unlocked.
  */
 
-void bclink_acknowledge(struct node *n_ptr, u32 acked)
+void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked)
 {
        struct sk_buff *crs;
        struct sk_buff *next;
@@ -226,16 +226,16 @@ void bclink_acknowledge(struct node *n_ptr, u32 acked)
        /* Try resolving broadcast link congestion, if necessary */
 
        if (unlikely(bcl->next_out))
-               link_push_queue(bcl);
+               tipc_link_push_queue(bcl);
        if (unlikely(released && !list_empty(&bcl->waiting_ports)))
-               link_wakeup_ports(bcl, 0);
+               tipc_link_wakeup_ports(bcl, 0);
        spin_unlock_bh(&bc_lock);
 }
 
 /** 
  * bclink_send_ack - unicast an ACK msg
  * 
- * net_lock and node lock set
+ * tipc_net_lock and node lock set
  */
 
 static void bclink_send_ack(struct node *n_ptr)
@@ -243,13 +243,13 @@ static void bclink_send_ack(struct node *n_ptr)
        struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
 
        if (l_ptr != NULL)
-               link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+               tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
 }
 
 /** 
  * bclink_send_nack- broadcast a NACK msg
  * 
- * net_lock and node lock set
+ * tipc_net_lock and node lock set
  */
 
 static void bclink_send_nack(struct node *n_ptr)
@@ -271,11 +271,11 @@ static void bclink_send_nack(struct node *n_ptr)
                msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
                msg_set_bcast_tag(msg, tipc_own_tag);
 
-               if (bearer_send(&bcbearer->bearer, buf, 0)) {
+               if (tipc_bearer_send(&bcbearer->bearer, buf, 0)) {
                        bcl->stats.sent_nacks++;
                        buf_discard(buf);
                } else {
-                       bearer_schedule(bcl->b_ptr, bcl);
+                       tipc_bearer_schedule(bcl->b_ptr, bcl);
                        bcl->proto_msg_queue = buf;
                        bcl->stats.bearer_congs++;
                }
@@ -291,12 +291,12 @@ static void bclink_send_nack(struct node *n_ptr)
 }
 
 /** 
- * bclink_check_gap - send a NACK if a sequence gap exists
+ * tipc_bclink_check_gap - send a NACK if a sequence gap exists
  *
- * net_lock and node lock set
+ * tipc_net_lock and node lock set
  */
 
-void bclink_check_gap(struct node *n_ptr, u32 last_sent)
+void tipc_bclink_check_gap(struct node *n_ptr, u32 last_sent)
 {
        if (!n_ptr->bclink.supported ||
            less_eq(last_sent, mod(n_ptr->bclink.last_in)))
@@ -309,19 +309,19 @@ void bclink_check_gap(struct node *n_ptr, u32 last_sent)
 }
 
 /** 
- * bclink_peek_nack - process a NACK msg meant for another node
+ * tipc_bclink_peek_nack - process a NACK msg meant for another node
  * 
- * Only net_lock set.
+ * Only tipc_net_lock set.
  */
 
-void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
+void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
 {
-       struct node *n_ptr = node_find(dest);
+       struct node *n_ptr = tipc_node_find(dest);
        u32 my_after, my_to;
 
-       if (unlikely(!n_ptr || !node_is_up(n_ptr)))
+       if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
                return;
-       node_lock(n_ptr);
+       tipc_node_lock(n_ptr);
        /*
         * Modify gap to suppress unnecessary NACKs from this node
         */
@@ -364,20 +364,20 @@ void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
                        bclink_set_gap(n_ptr);
                }
        }
-       node_unlock(n_ptr);
+       tipc_node_unlock(n_ptr);
 }
 
 /**
- * bclink_send_msg - broadcast a packet to all nodes in cluster
+ * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
  */
 
-int bclink_send_msg(struct sk_buff *buf)
+int tipc_bclink_send_msg(struct sk_buff *buf)
 {
        int res;
 
        spin_lock_bh(&bc_lock);
 
-       res = link_send_buf(bcl, buf);
+       res = tipc_link_send_buf(bcl, buf);
        if (unlikely(res == -ELINKCONG))
                buf_discard(buf);
        else
@@ -393,22 +393,22 @@ int bclink_send_msg(struct sk_buff *buf)
 }
 
 /**
- * bclink_recv_pkt - receive a broadcast packet, and deliver upwards
+ * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
  * 
- * net_lock is read_locked, no other locks set
+ * tipc_net_lock is read_locked, no other locks set
  */
 
-void bclink_recv_pkt(struct sk_buff *buf)
+void tipc_bclink_recv_pkt(struct sk_buff *buf)
 {        
        struct tipc_msg *msg = buf_msg(buf);
-       struct node* node = node_find(msg_prevnode(msg));
+       struct node* node = tipc_node_find(msg_prevnode(msg));
        u32 next_in;
        u32 seqno;
        struct sk_buff *deferred;
 
        msg_dbg(msg, "<BC<<<");
 
-       if (unlikely(!node || !node_is_up(node) || !node->bclink.supported || 
+       if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported || 
                     (msg_mc_netid(msg) != tipc_net_id))) {
                buf_discard(buf);
                return;
@@ -417,14 +417,14 @@ void bclink_recv_pkt(struct sk_buff *buf)
        if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
                msg_dbg(msg, "<BCNACK<<<");
                if (msg_destnode(msg) == tipc_own_addr) {
-                       node_lock(node);
-                       bclink_acknowledge(node, msg_bcast_ack(msg));
-                       node_unlock(node);
+                       tipc_node_lock(node);
+                       tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
+                       tipc_node_unlock(node);
                        bcl->stats.recv_nacks++;
                        bclink_retransmit_pkt(msg_bcgap_after(msg),
                                              msg_bcgap_to(msg));
                } else {
-                       bclink_peek_nack(msg_destnode(msg),
+                       tipc_bclink_peek_nack(msg_destnode(msg),
                                         msg_bcast_tag(msg),
                                         msg_bcgap_after(msg),
                                         msg_bcgap_to(msg));
@@ -433,7 +433,7 @@ void bclink_recv_pkt(struct sk_buff *buf)
                return;
        }
 
-       node_lock(node);
+       tipc_node_lock(node);
 receive:
        deferred = node->bclink.deferred_head;
        next_in = mod(node->bclink.last_in + 1);
@@ -448,26 +448,26 @@ receive:
                        bcl->stats.sent_acks++;
                }
                if (likely(msg_isdata(msg))) {
-                       node_unlock(node);
-                       port_recv_mcast(buf, NULL);
+                       tipc_node_unlock(node);
+                       tipc_port_recv_mcast(buf, NULL);
                } else if (msg_user(msg) == MSG_BUNDLER) {
                        bcl->stats.recv_bundles++;
                        bcl->stats.recv_bundled += msg_msgcnt(msg);
-                       node_unlock(node);
-                       link_recv_bundle(buf);
+                       tipc_node_unlock(node);
+                       tipc_link_recv_bundle(buf);
                } else if (msg_user(msg) == MSG_FRAGMENTER) {
                        bcl->stats.recv_fragments++;
-                       if (link_recv_fragment(&node->bclink.defragm,
-                                              &buf, &msg))
+                       if (tipc_link_recv_fragment(&node->bclink.defragm,
+                                                   &buf, &msg))
                                bcl->stats.recv_fragmented++;
-                       node_unlock(node);
-                       net_route_msg(buf);
+                       tipc_node_unlock(node);
+                       tipc_net_route_msg(buf);
                } else {
-                       node_unlock(node);
-                       net_route_msg(buf);
+                       tipc_node_unlock(node);
+                       tipc_net_route_msg(buf);
                }
                if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
-                       node_lock(node);
+                       tipc_node_lock(node);
                        buf = deferred;
                        msg = buf_msg(buf);
                        node->bclink.deferred_head = deferred->next;
@@ -478,9 +478,9 @@ receive:
                u32 gap_after = node->bclink.gap_after;
                u32 gap_to = node->bclink.gap_to;
 
-               if (link_defer_pkt(&node->bclink.deferred_head,
-                                  &node->bclink.deferred_tail,
-                                  buf)) {
+               if (tipc_link_defer_pkt(&node->bclink.deferred_head,
+                                       &node->bclink.deferred_tail,
+                                       buf)) {
                        node->bclink.nack_sync++;
                        bcl->stats.deferred_recv++;
                        if (seqno == mod(gap_after + 1))
@@ -497,10 +497,10 @@ receive:
                bcl->stats.duplicates++;
                buf_discard(buf);
        }
-       node_unlock(node);
+       tipc_node_unlock(node);
 }
 
-u32 bclink_get_last_sent(void)
+u32 tipc_bclink_get_last_sent(void)
 {
        u32 last_sent = mod(bcl->next_out_no - 1);
 
@@ -509,15 +509,15 @@ u32 bclink_get_last_sent(void)
        return last_sent;
 }
 
-u32 bclink_acks_missing(struct node *n_ptr)
+u32 tipc_bclink_acks_missing(struct node *n_ptr)
 {
        return (n_ptr->bclink.supported &&
-               (bclink_get_last_sent() != n_ptr->bclink.acked));
+               (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
 }
 
 
 /**
- * bcbearer_send - send a packet through the broadcast pseudo-bearer
+ * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
  * 
  * Send through as many bearers as necessary to reach all nodes
  * that support TIPC multicasting.
@@ -525,9 +525,9 @@ u32 bclink_acks_missing(struct node *n_ptr)
  * Returns 0 if packet sent successfully, non-zero if not
  */
 
-int bcbearer_send(struct sk_buff *buf,
-                 struct tipc_bearer *unused1,
-                 struct tipc_media_addr *unused2)
+int tipc_bcbearer_send(struct sk_buff *buf,
+                      struct tipc_bearer *unused1,
+                      struct tipc_media_addr *unused2)
 {
        static int send_count = 0;
 
@@ -541,8 +541,8 @@ int bcbearer_send(struct sk_buff *buf,
        if (likely(!msg_non_seq(buf_msg(buf)))) {
                struct tipc_msg *msg;
 
-               assert(cluster_bcast_nodes.count != 0);
-               bcbuf_set_acks(buf, cluster_bcast_nodes.count);
+               assert(tipc_cltr_bcast_nodes.count != 0);
+               bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count);
                msg = buf_msg(buf);
                msg_set_non_seq(msg);
                msg_set_mc_netid(msg, tipc_net_id);
@@ -555,7 +555,7 @@ int bcbearer_send(struct sk_buff *buf,
 
        /* Send buffer over bearers until all targets reached */
        
-       remains = cluster_bcast_nodes;
+       remains = tipc_cltr_bcast_nodes;
 
        for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
                struct bearer *p = bcbearer->bpairs[bp_index].primary;
@@ -564,7 +564,7 @@ int bcbearer_send(struct sk_buff *buf,
                if (!p)
                        break;  /* no more bearers to try */
 
-               nmap_diff(&remains, &p->nodes, &remains_new);
+               tipc_nmap_diff(&remains, &p->nodes, &remains_new);
                if (remains_new.count == remains.count)
                        continue;       /* bearer pair doesn't add anything */
 
@@ -597,10 +597,10 @@ update:
 }
 
 /**
- * bcbearer_sort - create sets of bearer pairs used by broadcast bearer
+ * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
  */
 
-void bcbearer_sort(void)
+void tipc_bcbearer_sort(void)
 {
        struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
        struct bcbearer_pair *bp_curr;
@@ -614,7 +614,7 @@ void bcbearer_sort(void)
        memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
 
        for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
-               struct bearer *b = &bearers[b_index];
+               struct bearer *b = &tipc_bearers[b_index];
 
                if (!b->active || !b->nodes.count)
                        continue;
@@ -630,7 +630,7 @@ void bcbearer_sort(void)
        bp_curr = bcbearer->bpairs;
        memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
 
-       for (pri = (TIPC_NUM_LINK_PRI - 1); pri >= 0; pri--) {
+       for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
 
                if (!bp_temp[pri].primary)
                        continue;
@@ -638,8 +638,8 @@ void bcbearer_sort(void)
                bp_curr->primary = bp_temp[pri].primary;
 
                if (bp_temp[pri].secondary) {
-                       if (nmap_equal(&bp_temp[pri].primary->nodes,
-                                      &bp_temp[pri].secondary->nodes)) {
+                       if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
+                                           &bp_temp[pri].secondary->nodes)) {
                                bp_curr->secondary = bp_temp[pri].secondary;
                        } else {
                                bp_curr++;
@@ -654,14 +654,14 @@ void bcbearer_sort(void)
 }
 
 /**
- * bcbearer_push - resolve bearer congestion
+ * tipc_bcbearer_push - resolve bearer congestion
  * 
  * Forces bclink to push out any unsent packets, until all packets are gone
  * or congestion reoccurs.
  * No locks set when function called
  */
 
-void bcbearer_push(void)
+void tipc_bcbearer_push(void)
 {
        struct bearer *b_ptr;
 
@@ -669,20 +669,20 @@ void bcbearer_push(void)
        b_ptr = &bcbearer->bearer;
        if (b_ptr->publ.blocked) {
                b_ptr->publ.blocked = 0;
-               bearer_lock_push(b_ptr);
+               tipc_bearer_lock_push(b_ptr);
        }
        spin_unlock_bh(&bc_lock);
 }
 
 
-int bclink_stats(char *buf, const u32 buf_size)
+int tipc_bclink_stats(char *buf, const u32 buf_size)
 {
        struct print_buf pb;
 
        if (!bcl)
                return 0;
 
-       printbuf_init(&pb, buf, buf_size);
+       tipc_printbuf_init(&pb, buf, buf_size);
 
        spin_lock_bh(&bc_lock);
 
@@ -718,10 +718,10 @@ int bclink_stats(char *buf, const u32 buf_size)
                    : 0);
 
        spin_unlock_bh(&bc_lock);
-       return printbuf_validate(&pb);
+       return tipc_printbuf_validate(&pb);
 }
 
-int bclink_reset_stats(void)
+int tipc_bclink_reset_stats(void)
 {
        if (!bcl)
                return -ENOPROTOOPT;
@@ -732,7 +732,7 @@ int bclink_reset_stats(void)
        return TIPC_OK;
 }
 
-int bclink_set_queue_limits(u32 limit)
+int tipc_bclink_set_queue_limits(u32 limit)
 {
        if (!bcl)
                return -ENOPROTOOPT;
@@ -740,12 +740,12 @@ int bclink_set_queue_limits(u32 limit)
                return -EINVAL;
 
        spin_lock_bh(&bc_lock);
-       link_set_queue_limits(bcl, limit);
+       tipc_link_set_queue_limits(bcl, limit);
        spin_unlock_bh(&bc_lock);
        return TIPC_OK;
 }
 
-int bclink_init(void)
+int tipc_bclink_init(void)
 {
        bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC);
        bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
@@ -762,7 +762,7 @@ int bclink_init(void)
        memset(bcbearer, 0, sizeof(struct bcbearer));
        INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
        bcbearer->bearer.media = &bcbearer->media;
-       bcbearer->media.send_msg = bcbearer_send;
+       bcbearer->media.send_msg = tipc_bcbearer_send;
        sprintf(bcbearer->media.name, "tipc-multicast");
 
        bcl = &bclink->link;
@@ -772,27 +772,27 @@ int bclink_init(void)
        bclink->node.lock =  SPIN_LOCK_UNLOCKED;        
        bcl->owner = &bclink->node;
         bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
-       link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
+       tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
        bcl->b_ptr = &bcbearer->bearer;
        bcl->state = WORKING_WORKING;
-       sprintf(bcl->name, bc_link_name);
+       sprintf(bcl->name, tipc_bclink_name);
 
        if (BCLINK_LOG_BUF_SIZE) {
                char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
 
                if (!pb)
                        goto nomem;
-               printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
+               tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
        }
 
        return TIPC_OK;
 }
 
-void bclink_stop(void)
+void tipc_bclink_stop(void)
 {
        spin_lock_bh(&bc_lock);
        if (bcbearer) {
-               link_stop(bcl);
+               tipc_link_stop(bcl);
                if (BCLINK_LOG_BUF_SIZE)
                        kfree(bcl->print_buf.buf);
                bcl = NULL;
index 5430e524b4f9179ae5934504fc1d94391d68ce24..0e3be2ab330731cf1e127bf3b886c082ebdf16b3 100644 (file)
@@ -70,14 +70,14 @@ struct port_list {
 
 struct node;
 
-extern char bc_link_name[];
+extern char tipc_bclink_name[];
 
 
 /**
  * nmap_get - determine if node exists in a node map
  */
 
-static inline int nmap_get(struct node_map *nm_ptr, u32 node)
+static inline int tipc_nmap_get(struct node_map *nm_ptr, u32 node)
 {
        int n = tipc_node(node);
        int w = n / WSIZE;
@@ -90,7 +90,7 @@ static inline int nmap_get(struct node_map *nm_ptr, u32 node)
  * nmap_add - add a node to a node map
  */
 
-static inline void nmap_add(struct node_map *nm_ptr, u32 node)
+static inline void tipc_nmap_add(struct node_map *nm_ptr, u32 node)
 {
        int n = tipc_node(node);
        int w = n / WSIZE;
@@ -106,7 +106,7 @@ static inline void nmap_add(struct node_map *nm_ptr, u32 node)
  * nmap_remove - remove a node from a node map
  */
 
-static inline void nmap_remove(struct node_map *nm_ptr, u32 node)
+static inline void tipc_nmap_remove(struct node_map *nm_ptr, u32 node)
 {
        int n = tipc_node(node);
        int w = n / WSIZE;
@@ -122,7 +122,7 @@ static inline void nmap_remove(struct node_map *nm_ptr, u32 node)
  * nmap_equal - test for equality of node maps
  */
 
-static inline int nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
+static inline int tipc_nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
 {
        return !memcmp(nm_a, nm_b, sizeof(*nm_a));
 }
@@ -134,8 +134,8 @@ static inline int nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
  * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
  */
 
-static inline void nmap_diff(struct node_map *nm_a, struct node_map *nm_b,
-                            struct node_map *nm_diff)
+static inline void tipc_nmap_diff(struct node_map *nm_a, struct node_map *nm_b,
+                                 struct node_map *nm_diff)
 {
        int stop = sizeof(nm_a->map) / sizeof(u32);
        int w;
@@ -159,7 +159,7 @@ static inline void nmap_diff(struct node_map *nm_a, struct node_map *nm_b,
  * port_list_add - add a port to a port list, ensuring no duplicates
  */
 
-static inline void port_list_add(struct port_list *pl_ptr, u32 port)
+static inline void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
 {
        struct port_list *item = pl_ptr;
        int i;
@@ -194,7 +194,7 @@ static inline void port_list_add(struct port_list *pl_ptr, u32 port)
  * Note: First item is on stack, so it doesn't need to be released
  */
 
-static inline void port_list_free(struct port_list *pl_ptr)
+static inline void tipc_port_list_free(struct port_list *pl_ptr)
 {
        struct port_list *item;
        struct port_list *next;
@@ -206,18 +206,18 @@ static inline void port_list_free(struct port_list *pl_ptr)
 }
 
 
-int  bclink_init(void);
-void bclink_stop(void);
-void bclink_acknowledge(struct node *n_ptr, u32 acked);
-int  bclink_send_msg(struct sk_buff *buf);
-void bclink_recv_pkt(struct sk_buff *buf);
-u32  bclink_get_last_sent(void);
-u32  bclink_acks_missing(struct node *n_ptr);
-void bclink_check_gap(struct node *n_ptr, u32 seqno);
-int  bclink_stats(char *stats_buf, const u32 buf_size);
-int  bclink_reset_stats(void);
-int  bclink_set_queue_limits(u32 limit);
-void bcbearer_sort(void);
-void bcbearer_push(void);
+int  tipc_bclink_init(void);
+void tipc_bclink_stop(void);
+void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked);
+int  tipc_bclink_send_msg(struct sk_buff *buf);
+void tipc_bclink_recv_pkt(struct sk_buff *buf);
+u32  tipc_bclink_get_last_sent(void);
+u32  tipc_bclink_acks_missing(struct node *n_ptr);
+void tipc_bclink_check_gap(struct node *n_ptr, u32 seqno);
+int  tipc_bclink_stats(char *stats_buf, const u32 buf_size);
+int  tipc_bclink_reset_stats(void);
+int  tipc_bclink_set_queue_limits(u32 limit);
+void tipc_bcbearer_sort(void);
+void tipc_bcbearer_push(void);
 
 #endif
index 3dd19fdc5a2cf5a97fc7f68f03bd5a58794d8569..64dcb0f3a8b28ba4559a1a34ed980dc0deaec408 100644 (file)
@@ -48,7 +48,7 @@
 static struct media *media_list = 0;
 static u32 media_count = 0;
 
-struct bearer *bearers = 0;
+struct bearer *tipc_bearers = 0;
 
 /**
  * media_name_valid - validate media name
@@ -107,7 +107,7 @@ int  tipc_register_media(u32 media_type,
        u32 i;
        int res = -EINVAL;
 
-       write_lock_bh(&net_lock);
+       write_lock_bh(&tipc_net_lock);
        if (!media_list)
                goto exit;
 
@@ -119,7 +119,8 @@ int  tipc_register_media(u32 media_type,
                warn("Media registration error: no broadcast address supplied\n");
                goto exit;
        }
-       if (bearer_priority >= TIPC_NUM_LINK_PRI) {
+       if ((bearer_priority < TIPC_MIN_LINK_PRI) &&
+           (bearer_priority > TIPC_MAX_LINK_PRI)) {
                warn("Media registration error: priority %u\n", bearer_priority);
                goto exit;
        }
@@ -164,15 +165,15 @@ int  tipc_register_media(u32 media_type,
        dbg("Media <%s> registered\n", name);
        res = 0;
 exit:
-       write_unlock_bh(&net_lock);
+       write_unlock_bh(&tipc_net_lock);
        return res;
 }
 
 /**
- * media_addr_printf - record media address in print buffer
+ * tipc_media_addr_printf - record media address in print buffer
  */
 
-void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
+void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
 {
        struct media *m_ptr;
        u32 media_type;
@@ -200,25 +201,25 @@ void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
 }
 
 /**
- * media_get_names - record names of registered media in buffer
+ * tipc_media_get_names - record names of registered media in buffer
  */
 
-struct sk_buff *media_get_names(void)
+struct sk_buff *tipc_media_get_names(void)
 {
        struct sk_buff *buf;
        struct media *m_ptr;
        int i;
 
-       buf = cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
+       buf = tipc_cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
        if (!buf)
                return NULL;
 
-       read_lock_bh(&net_lock);
+       read_lock_bh(&tipc_net_lock);
        for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
-               cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name, 
-                              strlen(m_ptr->name) + 1);
+               tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name, 
+                                   strlen(m_ptr->name) + 1);
        }
-       read_unlock_bh(&net_lock);
+       read_unlock_bh(&tipc_net_lock);
        return buf;
 }
 
@@ -282,7 +283,7 @@ static struct bearer *bearer_find(const char *name)
        struct bearer *b_ptr;
        u32 i;
 
-       for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) {
+       for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
                if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
                        return b_ptr;
        }
@@ -290,16 +291,16 @@ static struct bearer *bearer_find(const char *name)
 }
 
 /**
- * bearer_find - locates bearer object with matching interface name
+ * tipc_bearer_find_interface - locates bearer object with matching interface name
  */
 
-struct bearer *bearer_find_interface(const char *if_name)
+struct bearer *tipc_bearer_find_interface(const char *if_name)
 {
        struct bearer *b_ptr;
        char *b_if_name;
        u32 i;
 
-       for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) {
+       for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
                if (!b_ptr->active)
                        continue;
                b_if_name = strchr(b_ptr->publ.name, ':') + 1;
@@ -310,54 +311,54 @@ struct bearer *bearer_find_interface(const char *if_name)
 }
 
 /**
- * bearer_get_names - record names of bearers in buffer
+ * tipc_bearer_get_names - record names of bearers in buffer
  */
 
-struct sk_buff *bearer_get_names(void)
+struct sk_buff *tipc_bearer_get_names(void)
 {
        struct sk_buff *buf;
        struct media *m_ptr;
        struct bearer *b_ptr;
        int i, j;
 
-       buf = cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
+       buf = tipc_cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
        if (!buf)
                return NULL;
 
-       read_lock_bh(&net_lock);
+       read_lock_bh(&tipc_net_lock);
        for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
                for (j = 0; j < MAX_BEARERS; j++) {
-                       b_ptr = &bearers[j];
+                       b_ptr = &tipc_bearers[j];
                        if (b_ptr->active && (b_ptr->media == m_ptr)) {
-                               cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME, 
-                                              b_ptr->publ.name, 
-                                              strlen(b_ptr->publ.name) + 1);
+                               tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME, 
+                                                   b_ptr->publ.name, 
+                                                   strlen(b_ptr->publ.name) + 1);
                        }
                }
        }
-       read_unlock_bh(&net_lock);
+       read_unlock_bh(&tipc_net_lock);
        return buf;
 }
 
-void bearer_add_dest(struct bearer *b_ptr, u32 dest)
+void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest)
 {
-       nmap_add(&b_ptr->nodes, dest);
-       disc_update_link_req(b_ptr->link_req);
-       bcbearer_sort();
+       tipc_nmap_add(&b_ptr->nodes, dest);
+       tipc_disc_update_link_req(b_ptr->link_req);
+       tipc_bcbearer_sort();
 }
 
-void bearer_remove_dest(struct bearer *b_ptr, u32 dest)
+void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest)
 {
-       nmap_remove(&b_ptr->nodes, dest);
-       disc_update_link_req(b_ptr->link_req);
-       bcbearer_sort();
+       tipc_nmap_remove(&b_ptr->nodes, dest);
+       tipc_disc_update_link_req(b_ptr->link_req);
+       tipc_bcbearer_sort();
 }
 
 /*
  * bearer_push(): Resolve bearer congestion. Force the waiting
  * links to push out their unsent packets, one packet per link
  * per iteration, until all packets are gone or congestion reoccurs.
- * 'net_lock' is read_locked when this function is called
+ * 'tipc_net_lock' is read_locked when this function is called
  * bearer.lock must be taken before calling
  * Returns binary true(1) ore false(0)
  */
@@ -371,7 +372,7 @@ static int bearer_push(struct bearer *b_ptr)
 
        while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
                list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
-                       res = link_push_packet(ln);
+                       res = tipc_link_push_packet(ln);
                        if (res == PUSH_FAILED)
                                break;
                        if (res == PUSH_FINISHED)
@@ -381,7 +382,7 @@ static int bearer_push(struct bearer *b_ptr)
        return list_empty(&b_ptr->cong_links);
 }
 
-void bearer_lock_push(struct bearer *b_ptr)
+void tipc_bearer_lock_push(struct bearer *b_ptr)
 {
        int res;
 
@@ -389,7 +390,7 @@ void bearer_lock_push(struct bearer *b_ptr)
        res = bearer_push(b_ptr);
        spin_unlock_bh(&b_ptr->publ.lock);
        if (res)
-               bcbearer_push();
+               tipc_bcbearer_push();
 }
 
 
@@ -404,7 +405,7 @@ void tipc_continue(struct tipc_bearer *tb_ptr)
        spin_lock_bh(&b_ptr->publ.lock);
        b_ptr->continue_count++;
        if (!list_empty(&b_ptr->cong_links))
-               k_signal((Handler)bearer_lock_push, (unsigned long)b_ptr);
+               tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr);
        b_ptr->publ.blocked = 0;
        spin_unlock_bh(&b_ptr->publ.lock);
 }
@@ -413,11 +414,11 @@ void tipc_continue(struct tipc_bearer *tb_ptr)
  * Schedule link for sending of messages after the bearer 
  * has been deblocked by 'continue()'. This method is called 
  * when somebody tries to send a message via this link while 
- * the bearer is congested. 'net_lock' is in read_lock here
+ * the bearer is congested. 'tipc_net_lock' is in read_lock here
  * bearer.lock is busy
  */
 
-static void bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
+static void tipc_bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
 {
        list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
 }
@@ -426,24 +427,24 @@ static void bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
  * Schedule link for sending of messages after the bearer 
  * has been deblocked by 'continue()'. This method is called 
  * when somebody tries to send a message via this link while 
- * the bearer is congested. 'net_lock' is in read_lock here,
+ * the bearer is congested. 'tipc_net_lock' is in read_lock here,
  * bearer.lock is free
  */
 
-void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
+void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
 {
        spin_lock_bh(&b_ptr->publ.lock);
-       bearer_schedule_unlocked(b_ptr, l_ptr);
+       tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
        spin_unlock_bh(&b_ptr->publ.lock);
 }
 
 
 /*
- * bearer_resolve_congestion(): Check if there is bearer congestion,
+ * tipc_bearer_resolve_congestion(): Check if there is bearer congestion,
  * and if there is, try to resolve it before returning.
- * 'net_lock' is read_locked when this function is called
+ * 'tipc_net_lock' is read_locked when this function is called
  */
-int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
+int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
 {
        int res = 1;
 
@@ -451,7 +452,7 @@ int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
                return 1;
        spin_lock_bh(&b_ptr->publ.lock);
        if (!bearer_push(b_ptr)) {
-               bearer_schedule_unlocked(b_ptr, l_ptr);
+               tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
                res = 0;
        }
        spin_unlock_bh(&b_ptr->publ.lock);
@@ -476,14 +477,19 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
 
        if (tipc_mode != TIPC_NET_MODE)
                return -ENOPROTOOPT;
+
        if (!bearer_name_validate(name, &b_name) ||
-           !addr_domain_valid(bcast_scope) ||
-           !in_scope(bcast_scope, tipc_own_addr) ||
-           (priority > TIPC_NUM_LINK_PRI))
+           !tipc_addr_domain_valid(bcast_scope) ||
+           !in_scope(bcast_scope, tipc_own_addr))
+               return -EINVAL;
+
+       if ((priority < TIPC_MIN_LINK_PRI ||
+            priority > TIPC_MAX_LINK_PRI) &&
+           (priority != TIPC_MEDIA_LINK_PRI))
                return -EINVAL;
 
-       write_lock_bh(&net_lock);
-       if (!bearers)
+       write_lock_bh(&tipc_net_lock);
+       if (!tipc_bearers)
                goto failed;
 
        m_ptr = media_find(b_name.media_name);
@@ -491,22 +497,23 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
                warn("No media <%s>\n", b_name.media_name);
                goto failed;
        }
-       if (priority == TIPC_NUM_LINK_PRI)
+
+       if (priority == TIPC_MEDIA_LINK_PRI)
                priority = m_ptr->priority;
 
 restart:
        bearer_id = MAX_BEARERS;
        with_this_prio = 1;
        for (i = MAX_BEARERS; i-- != 0; ) {
-               if (!bearers[i].active) {
+               if (!tipc_bearers[i].active) {
                        bearer_id = i;
                        continue;
                }
-               if (!strcmp(name, bearers[i].publ.name)) {
+               if (!strcmp(name, tipc_bearers[i].publ.name)) {
                        warn("Bearer <%s> already enabled\n", name);
                        goto failed;
                }
-               if ((bearers[i].priority == priority) &&
+               if ((tipc_bearers[i].priority == priority) &&
                    (++with_this_prio > 2)) {
                        if (priority-- == 0) {
                                warn("Third bearer <%s> with priority %u, unable to lower to %u\n",
@@ -523,7 +530,7 @@ restart:
                goto failed;
        }
 
-       b_ptr = &bearers[bearer_id];
+       b_ptr = &tipc_bearers[bearer_id];
        memset(b_ptr, 0, sizeof(struct bearer));
 
        strcpy(b_ptr->publ.name, name);
@@ -542,16 +549,16 @@ restart:
        INIT_LIST_HEAD(&b_ptr->cong_links);
        INIT_LIST_HEAD(&b_ptr->links);
        if (m_ptr->bcast) {
-               b_ptr->link_req = disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
-                                                    bcast_scope, 2);
+               b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
+                                                         bcast_scope, 2);
        }
        b_ptr->publ.lock = SPIN_LOCK_UNLOCKED;
-       write_unlock_bh(&net_lock);
-       info("Enabled bearer <%s>, discovery domain %s\n",
-            name, addr_string_fill(addr_string, bcast_scope));
+       write_unlock_bh(&tipc_net_lock);
+       info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
+            name, addr_string_fill(addr_string, bcast_scope), priority);
        return 0;
 failed:
-       write_unlock_bh(&net_lock);
+       write_unlock_bh(&tipc_net_lock);
        return res;
 }
 
@@ -569,11 +576,11 @@ int tipc_block_bearer(const char *name)
        if (tipc_mode != TIPC_NET_MODE)
                return -ENOPROTOOPT;
 
-       read_lock_bh(&net_lock);
+       read_lock_bh(&tipc_net_lock);
        b_ptr = bearer_find(name);
        if (!b_ptr) {
                warn("Attempt to block unknown bearer <%s>\n", name);
-               read_unlock_bh(&net_lock);
+               read_unlock_bh(&tipc_net_lock);
                return -EINVAL;
        }
 
@@ -583,11 +590,11 @@ int tipc_block_bearer(const char *name)
                struct node *n_ptr = l_ptr->owner;
 
                spin_lock_bh(&n_ptr->lock);
-               link_reset(l_ptr);
+               tipc_link_reset(l_ptr);
                spin_unlock_bh(&n_ptr->lock);
        }
        spin_unlock_bh(&b_ptr->publ.lock);
-       read_unlock_bh(&net_lock);
+       read_unlock_bh(&tipc_net_lock);
        info("Blocked bearer <%s>\n", name);
        return TIPC_OK;
 }
@@ -595,7 +602,7 @@ int tipc_block_bearer(const char *name)
 /**
  * bearer_disable -
  * 
- * Note: This routine assumes caller holds net_lock.
+ * Note: This routine assumes caller holds tipc_net_lock.
  */
 
 static int bearer_disable(const char *name)
@@ -613,19 +620,19 @@ static int bearer_disable(const char *name)
                return -EINVAL;
        }
 
-       disc_stop_link_req(b_ptr->link_req);
+       tipc_disc_stop_link_req(b_ptr->link_req);
        spin_lock_bh(&b_ptr->publ.lock);
        b_ptr->link_req = NULL;
        b_ptr->publ.blocked = 1;
        if (b_ptr->media->disable_bearer) {
                spin_unlock_bh(&b_ptr->publ.lock);
-               write_unlock_bh(&net_lock);
+               write_unlock_bh(&tipc_net_lock);
                b_ptr->media->disable_bearer(&b_ptr->publ);
-               write_lock_bh(&net_lock);
+               write_lock_bh(&tipc_net_lock);
                spin_lock_bh(&b_ptr->publ.lock);
        }
        list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
-               link_delete(l_ptr);
+               tipc_link_delete(l_ptr);
        }
        spin_unlock_bh(&b_ptr->publ.lock);
        info("Disabled bearer <%s>\n", name);
@@ -637,54 +644,54 @@ int tipc_disable_bearer(const char *name)
 {
        int res;
 
-       write_lock_bh(&net_lock);
+       write_lock_bh(&tipc_net_lock);
        res = bearer_disable(name);
-       write_unlock_bh(&net_lock);
+       write_unlock_bh(&tipc_net_lock);
        return res;
 }
 
 
 
-int bearer_init(void)
+int tipc_bearer_init(void)
 {
        int res;
 
-       write_lock_bh(&net_lock);
-       bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC);
+       write_lock_bh(&tipc_net_lock);
+       tipc_bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC);
        media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC);
-       if (bearers && media_list) {
-               memset(bearers, 0, MAX_BEARERS * sizeof(struct bearer));
+       if (tipc_bearers && media_list) {
+               memset(tipc_bearers, 0, MAX_BEARERS * sizeof(struct bearer));
                memset(media_list, 0, MAX_MEDIA * sizeof(struct media));
                res = TIPC_OK;
        } else {
-               kfree(bearers);
+               kfree(tipc_bearers);
                kfree(media_list);
-               bearers = 0;
+               tipc_bearers = 0;
                media_list = 0;
                res = -ENOMEM;
        }
-       write_unlock_bh(&net_lock);
+       write_unlock_bh(&tipc_net_lock);
        return res;
 }
 
-void bearer_stop(void)
+void tipc_bearer_stop(void)
 {
        u32 i;
 
-       if (!bearers)
+       if (!tipc_bearers)
                return;
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               if (bearers[i].active)
-                       bearers[i].publ.blocked = 1;
+               if (tipc_bearers[i].active)
+                       tipc_bearers[i].publ.blocked = 1;
        }
        for (i = 0; i < MAX_BEARERS; i++) {
-               if (bearers[i].active)
-                       bearer_disable(bearers[i].publ.name);
+               if (tipc_bearers[i].active)
+                       bearer_disable(tipc_bearers[i].publ.name);
        }
-       kfree(bearers);
+       kfree(tipc_bearers);
        kfree(media_list);
-       bearers = 0;
+       tipc_bearers = 0;
        media_list = 0;
        media_count = 0;
 }
index 21e63d3f0183499ebe86d274abb0dc9ebfbd0474..c4e7c1c3655b84dba596da999a71ea69fb7bf775 100644 (file)
@@ -37,7 +37,7 @@
 #ifndef _TIPC_BEARER_H
 #define _TIPC_BEARER_H
 
-#include <net/tipc/tipc_bearer.h>
+#include "core.h"
 #include "bcast.h"
 
 #define MAX_BEARERS 8
@@ -114,26 +114,24 @@ struct bearer_name {
 
 struct link;
 
-extern struct bearer *bearers;
+extern struct bearer *tipc_bearers;
 
-void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
-struct sk_buff *media_get_names(void);
+void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
+struct sk_buff *tipc_media_get_names(void);
 
-struct sk_buff *bearer_get_names(void);
-void bearer_add_dest(struct bearer *b_ptr, u32 dest);
-void bearer_remove_dest(struct bearer *b_ptr, u32 dest);
-void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
-struct bearer *bearer_find_interface(const char *if_name);
-int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
-int bearer_init(void);
-void bearer_stop(void);
-int bearer_broadcast(struct sk_buff *buf, struct tipc_bearer *b_ptr,
-                    struct tipc_media_addr *dest);
-void bearer_lock_push(struct bearer *b_ptr);
+struct sk_buff *tipc_bearer_get_names(void);
+void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest);
+void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest);
+void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
+struct bearer *tipc_bearer_find_interface(const char *if_name);
+int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
+int tipc_bearer_init(void);
+void tipc_bearer_stop(void);
+void tipc_bearer_lock_push(struct bearer *b_ptr);
 
 
 /**
- * bearer_send- sends buffer to destination over bearer 
+ * tipc_bearer_send- sends buffer to destination over bearer 
  * 
  * Returns true (1) if successful, or false (0) if unable to send
  * 
@@ -150,23 +148,23 @@ void bearer_lock_push(struct bearer *b_ptr);
  * and let TIPC's link code deal with the undelivered message. 
  */
 
-static inline int bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
-                             struct tipc_media_addr *dest)
+static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
+                                  struct tipc_media_addr *dest)
 {
        return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
 }
 
 /**
- * bearer_congested - determines if bearer is currently congested
+ * tipc_bearer_congested - determines if bearer is currently congested
  */
 
-static inline int bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
+static inline int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
 {
        if (unlikely(b_ptr->publ.blocked))
                return 1;
        if (likely(list_empty(&b_ptr->cong_links)))
                return 0;
-       return !bearer_resolve_congestion(b_ptr, l_ptr);
+       return !tipc_bearer_resolve_congestion(b_ptr, l_ptr);
 }
 
 #endif
index f0f7bac51d41c68018280bc84603151b4cabbd9a..ab974ca19371caab2120e454b2c0834d8c9a8040 100644 (file)
 #include "msg.h"
 #include "bearer.h"
 
-void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf, 
-                      u32 lower, u32 upper);
-struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest);
+void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf, 
+                        u32 lower, u32 upper);
+struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest);
 
-struct node **local_nodes = 0;
-struct node_map cluster_bcast_nodes = {0,{0,}};
-u32 highest_allowed_slave = 0;
+struct node **tipc_local_nodes = 0;
+struct node_map tipc_cltr_bcast_nodes = {0,{0,}};
+u32 tipc_highest_allowed_slave = 0;
 
-struct cluster *cluster_create(u32 addr)
+struct cluster *tipc_cltr_create(u32 addr)
 {
        struct _zone *z_ptr;
        struct cluster *c_ptr;
@@ -77,16 +77,16 @@ struct cluster *cluster_create(u32 addr)
        }
        memset(c_ptr->nodes, 0, alloc);  
        if (in_own_cluster(addr))
-               local_nodes = c_ptr->nodes;
+               tipc_local_nodes = c_ptr->nodes;
        c_ptr->highest_slave = LOWEST_SLAVE - 1;
        c_ptr->highest_node = 0;
        
-       z_ptr = zone_find(tipc_zone(addr));
+       z_ptr = tipc_zone_find(tipc_zone(addr));
        if (z_ptr == NULL) {
-               z_ptr = zone_create(addr);
+               z_ptr = tipc_zone_create(addr);
        }
        if (z_ptr != NULL) {
-               zone_attach_cluster(z_ptr, c_ptr);
+               tipc_zone_attach_cluster(z_ptr, c_ptr);
                c_ptr->owner = z_ptr;
        }
        else {
@@ -97,23 +97,23 @@ struct cluster *cluster_create(u32 addr)
        return c_ptr;
 }
 
-void cluster_delete(struct cluster *c_ptr)
+void tipc_cltr_delete(struct cluster *c_ptr)
 {
        u32 n_num;
 
        if (!c_ptr)
                return;
        for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
-               node_delete(c_ptr->nodes[n_num]);
+               tipc_node_delete(c_ptr->nodes[n_num]);
        }
        for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
-               node_delete(c_ptr->nodes[n_num]);
+               tipc_node_delete(c_ptr->nodes[n_num]);
        }
        kfree(c_ptr->nodes);
        kfree(c_ptr);
 }
 
-u32 cluster_next_node(struct cluster *c_ptr, u32 addr)
+u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr)
 {
        struct node *n_ptr;
        u32 n_num = tipc_node(addr) + 1;
@@ -122,24 +122,24 @@ u32 cluster_next_node(struct cluster *c_ptr, u32 addr)
                return addr;
        for (; n_num <= c_ptr->highest_node; n_num++) {
                n_ptr = c_ptr->nodes[n_num];
-               if (n_ptr && node_has_active_links(n_ptr))
+               if (n_ptr && tipc_node_has_active_links(n_ptr))
                        return n_ptr->addr;
        }
        for (n_num = 1; n_num < tipc_node(addr); n_num++) {
                n_ptr = c_ptr->nodes[n_num];
-               if (n_ptr && node_has_active_links(n_ptr))
+               if (n_ptr && tipc_node_has_active_links(n_ptr))
                        return n_ptr->addr;
        }
        return 0;
 }
 
-void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr)
+void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr)
 {
        u32 n_num = tipc_node(n_ptr->addr);
        u32 max_n_num = tipc_max_nodes;
 
        if (in_own_cluster(n_ptr->addr))
-               max_n_num = highest_allowed_slave;
+               max_n_num = tipc_highest_allowed_slave;
        assert(n_num > 0);
        assert(n_num <= max_n_num);
        assert(c_ptr->nodes[n_num] == 0);
@@ -149,12 +149,12 @@ void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr)
 }
 
 /**
- * cluster_select_router - select router to a cluster
+ * tipc_cltr_select_router - select router to a cluster
  * 
  * Uses deterministic and fair algorithm.
  */
 
-u32 cluster_select_router(struct cluster *c_ptr, u32 ref)
+u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref)
 {
        u32 n_num;
        u32 ulim = c_ptr->highest_node;
@@ -174,29 +174,29 @@ u32 cluster_select_router(struct cluster *c_ptr, u32 ref)
 
        /* Lookup upwards with wrap-around */
        do {
-               if (node_is_up(c_ptr->nodes[n_num]))
+               if (tipc_node_is_up(c_ptr->nodes[n_num]))
                        break;
        } while (++n_num <= ulim);
        if (n_num > ulim) {
                n_num = 1;
                do {
-                       if (node_is_up(c_ptr->nodes[n_num]))
+                       if (tipc_node_is_up(c_ptr->nodes[n_num]))
                                break;
                } while (++n_num < tstart);
                if (n_num == tstart)
                        return 0;
        }
        assert(n_num <= ulim);
-       return node_select_router(c_ptr->nodes[n_num], ref);
+       return tipc_node_select_router(c_ptr->nodes[n_num], ref);
 }
 
 /**
- * cluster_select_node - select destination node within a remote cluster
+ * tipc_cltr_select_node - select destination node within a remote cluster
  * 
  * Uses deterministic and fair algorithm.
  */
 
-struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
+struct node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector)
 {
        u32 n_num;
        u32 mask = tipc_max_nodes;
@@ -215,11 +215,11 @@ struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
 
        /* Lookup upwards with wrap-around */
        for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
-               if (node_has_active_links(c_ptr->nodes[n_num]))
+               if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
                        return c_ptr->nodes[n_num];
        }
        for (n_num = 1; n_num < start_entry; n_num++) {
-               if (node_has_active_links(c_ptr->nodes[n_num]))
+               if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
                        return c_ptr->nodes[n_num];
        }
        return 0;
@@ -229,7 +229,7 @@ struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
  *    Routing table management: See description in node.c
  */
 
-struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest)
+struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest)
 {
        u32 size = INT_H_SIZE + data_size;
        struct sk_buff *buf = buf_acquire(size);
@@ -243,39 +243,39 @@ struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest)
        return buf;
 }
 
-void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest,
+void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest,
                             u32 lower, u32 upper)
 {
-       struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
+       struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
        struct tipc_msg *msg;
 
        if (buf) {
                msg = buf_msg(buf);
                msg_set_remote_node(msg, dest);
                msg_set_type(msg, ROUTE_ADDITION);
-               cluster_multicast(c_ptr, buf, lower, upper);
+               tipc_cltr_multicast(c_ptr, buf, lower, upper);
        } else {
                warn("Memory squeeze: broadcast of new route failed\n");
        }
 }
 
-void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest,
-                             u32 lower, u32 upper)
+void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest,
+                               u32 lower, u32 upper)
 {
-       struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
+       struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
        struct tipc_msg *msg;
 
        if (buf) {
                msg = buf_msg(buf);
                msg_set_remote_node(msg, dest);
                msg_set_type(msg, ROUTE_REMOVAL);
-               cluster_multicast(c_ptr, buf, lower, upper);
+               tipc_cltr_multicast(c_ptr, buf, lower, upper);
        } else {
                warn("Memory squeeze: broadcast of lost route failed\n");
        }
 }
 
-void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
+void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest)
 {
        struct sk_buff *buf;
        struct tipc_msg *msg;
@@ -288,21 +288,21 @@ void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
        assert(in_own_cluster(c_ptr->addr));
        if (highest <= LOWEST_SLAVE)
                return;
-       buf = cluster_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
-                                         c_ptr->addr);
+       buf = tipc_cltr_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
+                                           c_ptr->addr);
        if (buf) {
                msg = buf_msg(buf);
                msg_set_remote_node(msg, c_ptr->addr);
                msg_set_type(msg, SLAVE_ROUTING_TABLE);
                for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
                        if (c_ptr->nodes[n_num] && 
-                           node_has_active_links(c_ptr->nodes[n_num])) {
+                           tipc_node_has_active_links(c_ptr->nodes[n_num])) {
                                send = 1;
                                msg_set_dataoctet(msg, n_num);
                        }
                }
                if (send)
-                       link_send(buf, dest, dest);
+                       tipc_link_send(buf, dest, dest);
                else
                        buf_discard(buf);
        } else {
@@ -310,7 +310,7 @@ void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
        }
 }
 
-void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
+void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest)
 {
        struct sk_buff *buf;
        struct tipc_msg *msg;
@@ -323,20 +323,20 @@ void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
        assert(!is_slave(dest));
        assert(in_own_cluster(dest));
        highest = c_ptr->highest_node;
-       buf = cluster_prepare_routing_msg(highest + 1, c_ptr->addr);
+       buf = tipc_cltr_prepare_routing_msg(highest + 1, c_ptr->addr);
        if (buf) {
                msg = buf_msg(buf);
                msg_set_remote_node(msg, c_ptr->addr);
                msg_set_type(msg, EXT_ROUTING_TABLE);
                for (n_num = 1; n_num <= highest; n_num++) {
                        if (c_ptr->nodes[n_num] && 
-                           node_has_active_links(c_ptr->nodes[n_num])) {
+                           tipc_node_has_active_links(c_ptr->nodes[n_num])) {
                                send = 1;
                                msg_set_dataoctet(msg, n_num);
                        }
                }
                if (send)
-                       link_send(buf, dest, dest);
+                       tipc_link_send(buf, dest, dest);
                else
                        buf_discard(buf);
        } else {
@@ -344,7 +344,7 @@ void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
        }
 }
 
-void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
+void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest)
 {
        struct sk_buff *buf;
        struct tipc_msg *msg;
@@ -354,20 +354,20 @@ void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
 
        assert(is_slave(dest));
        assert(in_own_cluster(c_ptr->addr));
-       buf = cluster_prepare_routing_msg(highest, c_ptr->addr);
+       buf = tipc_cltr_prepare_routing_msg(highest, c_ptr->addr);
        if (buf) {
                msg = buf_msg(buf);
                msg_set_remote_node(msg, c_ptr->addr);
                msg_set_type(msg, LOCAL_ROUTING_TABLE);
                for (n_num = 1; n_num <= highest; n_num++) {
                        if (c_ptr->nodes[n_num] && 
-                           node_has_active_links(c_ptr->nodes[n_num])) {
+                           tipc_node_has_active_links(c_ptr->nodes[n_num])) {
                                send = 1;
                                msg_set_dataoctet(msg, n_num);
                        }
                }
                if (send)
-                       link_send(buf, dest, dest);
+                       tipc_link_send(buf, dest, dest);
                else
                        buf_discard(buf);
        } else {
@@ -375,7 +375,7 @@ void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
        }
 }
 
-void cluster_recv_routing_table(struct sk_buff *buf)
+void tipc_cltr_recv_routing_table(struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
        struct cluster *c_ptr;
@@ -388,9 +388,9 @@ void cluster_recv_routing_table(struct sk_buff *buf)
        u32 c_num;
        u32 n_num;
 
-       c_ptr = cluster_find(rem_node);
+       c_ptr = tipc_cltr_find(rem_node);
        if (!c_ptr) {
-               c_ptr = cluster_create(rem_node);
+               c_ptr = tipc_cltr_create(rem_node);
                if (!c_ptr) {
                        buf_discard(buf);
                        return;
@@ -412,10 +412,10 @@ void cluster_recv_routing_table(struct sk_buff *buf)
                                u32 addr = tipc_addr(z_num, c_num, n_num);
                                n_ptr = c_ptr->nodes[n_num];
                                if (!n_ptr) {
-                                       n_ptr = node_create(addr);
+                                       n_ptr = tipc_node_create(addr);
                                }
                                if (n_ptr)
-                                       node_add_router(n_ptr, router);
+                                       tipc_node_add_router(n_ptr, router);
                        }
                }
                break;
@@ -428,10 +428,10 @@ void cluster_recv_routing_table(struct sk_buff *buf)
                                u32 addr = tipc_addr(z_num, c_num, slave_num);
                                n_ptr = c_ptr->nodes[slave_num];
                                if (!n_ptr) {
-                                       n_ptr = node_create(addr);
+                                       n_ptr = tipc_node_create(addr);
                                }
                                if (n_ptr)
-                                       node_add_router(n_ptr, router);
+                                       tipc_node_add_router(n_ptr, router);
                        }
                }
                break;
@@ -445,9 +445,9 @@ void cluster_recv_routing_table(struct sk_buff *buf)
                }
                n_ptr = c_ptr->nodes[tipc_node(rem_node)];
                if (!n_ptr)
-                       n_ptr = node_create(rem_node);
+                       n_ptr = tipc_node_create(rem_node);
                if (n_ptr)
-                       node_add_router(n_ptr, router);
+                       tipc_node_add_router(n_ptr, router);
                break;
        case ROUTE_REMOVAL:
                if (!is_slave(tipc_own_addr)) {
@@ -459,7 +459,7 @@ void cluster_recv_routing_table(struct sk_buff *buf)
                }
                n_ptr = c_ptr->nodes[tipc_node(rem_node)];
                if (n_ptr)
-                       node_remove_router(n_ptr, router);
+                       tipc_node_remove_router(n_ptr, router);
                break;
        default:
                assert(!"Illegal routing manager message received\n");
@@ -467,7 +467,7 @@ void cluster_recv_routing_table(struct sk_buff *buf)
        buf_discard(buf);
 }
 
-void cluster_remove_as_router(struct cluster *c_ptr, u32 router)
+void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router)
 {
        u32 start_entry;
        u32 tstop;
@@ -486,17 +486,17 @@ void cluster_remove_as_router(struct cluster *c_ptr, u32 router)
 
        for (n_num = start_entry; n_num <= tstop; n_num++) {
                if (c_ptr->nodes[n_num]) {
-                       node_remove_router(c_ptr->nodes[n_num], router);
+                       tipc_node_remove_router(c_ptr->nodes[n_num], router);
                }
        }
 }
 
 /**
- * cluster_multicast - multicast message to local nodes 
+ * tipc_cltr_multicast - multicast message to local nodes 
  */
 
-void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf, 
-                      u32 lower, u32 upper)
+void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf, 
+                        u32 lower, u32 upper)
 {
        struct sk_buff *buf_copy;
        struct node *n_ptr;
@@ -505,9 +505,9 @@ void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
 
        assert(lower <= upper);
        assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
-              ((lower >= LOWEST_SLAVE) && (lower <= highest_allowed_slave)));
+              ((lower >= LOWEST_SLAVE) && (lower <= tipc_highest_allowed_slave)));
        assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
-              ((upper >= LOWEST_SLAVE) && (upper <= highest_allowed_slave)));
+              ((upper >= LOWEST_SLAVE) && (upper <= tipc_highest_allowed_slave)));
        assert(in_own_cluster(c_ptr->addr));
 
        tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
@@ -515,22 +515,22 @@ void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
                tstop = upper;
        for (n_num = lower; n_num <= tstop; n_num++) {
                n_ptr = c_ptr->nodes[n_num];
-               if (n_ptr && node_has_active_links(n_ptr)) {
+               if (n_ptr && tipc_node_has_active_links(n_ptr)) {
                        buf_copy = skb_copy(buf, GFP_ATOMIC);
                        if (buf_copy == NULL)
                                break;
                        msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
-                       link_send(buf_copy, n_ptr->addr, n_ptr->addr);
+                       tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
                }
        }
        buf_discard(buf);
 }
 
 /**
- * cluster_broadcast - broadcast message to all nodes within cluster
+ * tipc_cltr_broadcast - broadcast message to all nodes within cluster
  */
 
-void cluster_broadcast(struct sk_buff *buf)
+void tipc_cltr_broadcast(struct sk_buff *buf)
 {
        struct sk_buff *buf_copy;
        struct cluster *c_ptr;
@@ -541,7 +541,7 @@ void cluster_broadcast(struct sk_buff *buf)
        u32 node_type;
 
        if (tipc_mode == TIPC_NET_MODE) {
-               c_ptr = cluster_find(tipc_own_addr);
+               c_ptr = tipc_cltr_find(tipc_own_addr);
                assert(in_own_cluster(c_ptr->addr));    /* For now */
 
                /* Send to standard nodes, then repeat loop sending to slaves */
@@ -550,14 +550,14 @@ void cluster_broadcast(struct sk_buff *buf)
                for (node_type = 1; node_type <= 2; node_type++) {
                        for (n_num = tstart; n_num <= tstop; n_num++) {
                                n_ptr = c_ptr->nodes[n_num];
-                               if (n_ptr && node_has_active_links(n_ptr)) {
+                               if (n_ptr && tipc_node_has_active_links(n_ptr)) {
                                        buf_copy = skb_copy(buf, GFP_ATOMIC);
                                        if (buf_copy == NULL)
                                                goto exit;
                                        msg_set_destnode(buf_msg(buf_copy), 
                                                         n_ptr->addr);
-                                       link_send(buf_copy, n_ptr->addr, 
-                                                 n_ptr->addr);
+                                       tipc_link_send(buf_copy, n_ptr->addr, 
+                                                      n_ptr->addr);
                                }
                        }
                        tstart = LOWEST_SLAVE;
@@ -568,9 +568,9 @@ exit:
        buf_discard(buf);
 }
 
-int cluster_init(void)
+int tipc_cltr_init(void)
 {
-       highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
-       return cluster_create(tipc_own_addr) ? TIPC_OK : -ENOMEM;
+       tipc_highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
+       return tipc_cltr_create(tipc_own_addr) ? TIPC_OK : -ENOMEM;
 }
 
index 1ffb095991df4bac1ea995df319b368d962f4678..9963642e105833dbfb05ac4cff0b2f9bfac5626a 100644 (file)
@@ -60,29 +60,29 @@ struct cluster {
 };
 
 
-extern struct node **local_nodes;
-extern u32 highest_allowed_slave;
-extern struct node_map cluster_bcast_nodes;
+extern struct node **tipc_local_nodes;
+extern u32 tipc_highest_allowed_slave;
+extern struct node_map tipc_cltr_bcast_nodes;
 
-void cluster_remove_as_router(struct cluster *c_ptr, u32 router);
-void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest);
-struct node *cluster_select_node(struct cluster *c_ptr, u32 selector);
-u32 cluster_select_router(struct cluster *c_ptr, u32 ref);
-void cluster_recv_routing_table(struct sk_buff *buf);
-struct cluster *cluster_create(u32 addr);
-void cluster_delete(struct cluster *c_ptr);
-void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr);
-void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest);
-void cluster_broadcast(struct sk_buff *buf);
-int cluster_init(void);
-u32 cluster_next_node(struct cluster *c_ptr, u32 addr);
-void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
-void cluster_send_local_routes(struct cluster *c_ptr, u32 dest);
-void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
+void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router);
+void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest);
+struct node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector);
+u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref);
+void tipc_cltr_recv_routing_table(struct sk_buff *buf);
+struct cluster *tipc_cltr_create(u32 addr);
+void tipc_cltr_delete(struct cluster *c_ptr);
+void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr);
+void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest);
+void tipc_cltr_broadcast(struct sk_buff *buf);
+int tipc_cltr_init(void);
+u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr);
+void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
+void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest);
+void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
 
-static inline struct cluster *cluster_find(u32 addr)
+static inline struct cluster *tipc_cltr_find(u32 addr)
 {
-       struct _zone *z_ptr = zone_find(addr);
+       struct _zone *z_ptr = tipc_zone_find(addr);
 
        if (z_ptr)
                return z_ptr->clusters[1];
index 8ddef4fce2c2db36d8312a3cdde87b74ededa653..3c8e6740e5aee653151580dd67d5df4bcd09a492 100644 (file)
@@ -70,13 +70,13 @@ static int req_tlv_space;           /* request message TLV area size */
 static int rep_headroom;               /* reply message headroom to use */
 
 
-void cfg_link_event(u32 addr, char *name, int up)
+void tipc_cfg_link_event(u32 addr, char *name, int up)
 {
        /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */
 }
 
 
-struct sk_buff *cfg_reply_alloc(int payload_size)
+struct sk_buff *tipc_cfg_reply_alloc(int payload_size)
 {
        struct sk_buff *buf;
 
@@ -86,14 +86,14 @@ struct sk_buff *cfg_reply_alloc(int payload_size)
        return buf;
 }
 
-int cfg_append_tlv(struct sk_buff *buf, int tlv_type, 
-                  void *tlv_data, int tlv_data_size)
+int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type, 
+                       void *tlv_data, int tlv_data_size)
 {
        struct tlv_desc *tlv = (struct tlv_desc *)buf->tail;
        int new_tlv_space = TLV_SPACE(tlv_data_size);
 
        if (skb_tailroom(buf) < new_tlv_space) {
-               dbg("cfg_append_tlv unable to append TLV\n");
+               dbg("tipc_cfg_append_tlv unable to append TLV\n");
                return 0;
        }
        skb_put(buf, new_tlv_space);
@@ -104,28 +104,28 @@ int cfg_append_tlv(struct sk_buff *buf, int tlv_type,
        return 1;
 }
 
-struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value)
+struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value)
 {
        struct sk_buff *buf;
        u32 value_net;
 
-       buf = cfg_reply_alloc(TLV_SPACE(sizeof(value)));
+       buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value)));
        if (buf) {
                value_net = htonl(value);
-               cfg_append_tlv(buf, tlv_type, &value_net, 
-                              sizeof(value_net));
+               tipc_cfg_append_tlv(buf, tlv_type, &value_net, 
+                                   sizeof(value_net));
        }
        return buf;
 }
 
-struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string)
+struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string)
 {
        struct sk_buff *buf;
        int string_len = strlen(string) + 1;
 
-       buf = cfg_reply_alloc(TLV_SPACE(string_len));
+       buf = tipc_cfg_reply_alloc(TLV_SPACE(string_len));
        if (buf)
-               cfg_append_tlv(buf, tlv_type, string, string_len);
+               tipc_cfg_append_tlv(buf, tlv_type, string, string_len);
        return buf;
 }
 
@@ -246,7 +246,7 @@ static void cfg_cmd_event(struct tipc_cmd_msg *msg,
        exit:
        rmsg.result_len = htonl(msg_sect[1].iov_len);
        rmsg.retval = htonl(rv);
-       cfg_respond(msg_sect, 2u, orig);
+       tipc_cfg_respond(msg_sect, 2u, orig);
 }
 #endif
 
@@ -255,26 +255,26 @@ static struct sk_buff *cfg_enable_bearer(void)
        struct tipc_bearer_config *args;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
        if (tipc_enable_bearer(args->name,
                               ntohl(args->detect_scope),
                               ntohl(args->priority)))
-               return cfg_reply_error_string("unable to enable bearer");
+               return tipc_cfg_reply_error_string("unable to enable bearer");
 
-       return cfg_reply_none();
+       return tipc_cfg_reply_none();
 }
 
 static struct sk_buff *cfg_disable_bearer(void)
 {
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area)))
-               return cfg_reply_error_string("unable to disable bearer");
+               return tipc_cfg_reply_error_string("unable to disable bearer");
 
-       return cfg_reply_none();
+       return tipc_cfg_reply_none();
 }
 
 static struct sk_buff *cfg_set_own_addr(void)
@@ -282,25 +282,25 @@ static struct sk_buff *cfg_set_own_addr(void)
        u32 addr;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        addr = *(u32 *)TLV_DATA(req_tlv_area);
        addr = ntohl(addr);
        if (addr == tipc_own_addr)
-               return cfg_reply_none();
-       if (!addr_node_valid(addr))
-               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                             " (node address)");
+               return tipc_cfg_reply_none();
+       if (!tipc_addr_node_valid(addr))
+               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                                  " (node address)");
        if (tipc_own_addr)
-               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
-                                             " (cannot change node address once assigned)");
+               return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                                  " (cannot change node address once assigned)");
 
        spin_unlock_bh(&config_lock);
-       stop_net();
+       tipc_core_stop_net();
        tipc_own_addr = addr;
-       start_net();
+       tipc_core_start_net();
        spin_lock_bh(&config_lock);
-       return cfg_reply_none();
+       return tipc_cfg_reply_none();
 }
 
 static struct sk_buff *cfg_set_remote_mng(void)
@@ -308,12 +308,12 @@ static struct sk_buff *cfg_set_remote_mng(void)
        u32 value;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        value = *(u32 *)TLV_DATA(req_tlv_area);
        value = ntohl(value);
        tipc_remote_management = (value != 0);
-       return cfg_reply_none();
+       return tipc_cfg_reply_none();
 }
 
 static struct sk_buff *cfg_set_max_publications(void)
@@ -321,15 +321,15 @@ static struct sk_buff *cfg_set_max_publications(void)
        u32 value;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        value = *(u32 *)TLV_DATA(req_tlv_area);
        value = ntohl(value);
        if (value != delimit(value, 1, 65535))
-               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                             " (max publications must be 1-65535)");
+               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                                  " (max publications must be 1-65535)");
        tipc_max_publications = value;
-       return cfg_reply_none();
+       return tipc_cfg_reply_none();
 }
 
 static struct sk_buff *cfg_set_max_subscriptions(void)
@@ -337,15 +337,15 @@ static struct sk_buff *cfg_set_max_subscriptions(void)
        u32 value;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        value = *(u32 *)TLV_DATA(req_tlv_area);
        value = ntohl(value);
        if (value != delimit(value, 1, 65535))
-               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                             " (max subscriptions must be 1-65535");
+               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                                  " (max subscriptions must be 1-65535");
        tipc_max_subscriptions = value;
-       return cfg_reply_none();
+       return tipc_cfg_reply_none();
 }
 
 static struct sk_buff *cfg_set_max_ports(void)
@@ -354,31 +354,31 @@ static struct sk_buff *cfg_set_max_ports(void)
        u32 value;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
        value = *(u32 *)TLV_DATA(req_tlv_area);
        value = ntohl(value);
        if (value != delimit(value, 127, 65535))
-               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                             " (max ports must be 127-65535)");
+               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                                  " (max ports must be 127-65535)");
 
        if (value == tipc_max_ports)
-               return cfg_reply_none();
+               return tipc_cfg_reply_none();
 
        if (atomic_read(&tipc_user_count) > 2)
-               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
-                                             " (cannot change max ports while TIPC users exist)");
+               return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                                  " (cannot change max ports while TIPC users exist)");
 
        spin_unlock_bh(&config_lock);
        orig_mode = tipc_get_mode();
        if (orig_mode == TIPC_NET_MODE)
-               stop_net();
-       stop_core();
+               tipc_core_stop_net();
+       tipc_core_stop();
        tipc_max_ports = value;
-       start_core();
+       tipc_core_start();
        if (orig_mode == TIPC_NET_MODE)
-               start_net();
+               tipc_core_start_net();
        spin_lock_bh(&config_lock);
-       return cfg_reply_none();
+       return tipc_cfg_reply_none();
 }
 
 static struct sk_buff *set_net_max(int value, int *parameter)
@@ -388,13 +388,13 @@ static struct sk_buff *set_net_max(int value, int *parameter)
        if (value != *parameter) {
                orig_mode = tipc_get_mode();
                if (orig_mode == TIPC_NET_MODE)
-                       stop_net();
+                       tipc_core_stop_net();
                *parameter = value;
                if (orig_mode == TIPC_NET_MODE)
-                       start_net();
+                       tipc_core_start_net();
        }
 
-       return cfg_reply_none();
+       return tipc_cfg_reply_none();
 }
 
 static struct sk_buff *cfg_set_max_zones(void)
@@ -402,12 +402,12 @@ static struct sk_buff *cfg_set_max_zones(void)
        u32 value;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
        value = *(u32 *)TLV_DATA(req_tlv_area);
        value = ntohl(value);
        if (value != delimit(value, 1, 255))
-               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                             " (max zones must be 1-255)");
+               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                                  " (max zones must be 1-255)");
        return set_net_max(value, &tipc_max_zones);
 }
 
@@ -416,13 +416,13 @@ static struct sk_buff *cfg_set_max_clusters(void)
        u32 value;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
        value = *(u32 *)TLV_DATA(req_tlv_area);
        value = ntohl(value);
        if (value != 1)
-               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
-                                             " (max clusters fixed at 1)");
-       return cfg_reply_none();
+               return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                                  " (max clusters fixed at 1)");
+       return tipc_cfg_reply_none();
 }
 
 static struct sk_buff *cfg_set_max_nodes(void)
@@ -430,12 +430,12 @@ static struct sk_buff *cfg_set_max_nodes(void)
        u32 value;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
        value = *(u32 *)TLV_DATA(req_tlv_area);
        value = ntohl(value);
        if (value != delimit(value, 8, 2047))
-               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                             " (max nodes must be 8-2047)");
+               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                                  " (max nodes must be 8-2047)");
        return set_net_max(value, &tipc_max_nodes);
 }
 
@@ -444,13 +444,13 @@ static struct sk_buff *cfg_set_max_slaves(void)
        u32 value;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
        value = *(u32 *)TLV_DATA(req_tlv_area);
        value = ntohl(value);
        if (value != 0)
-               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
-                                             " (max secondary nodes fixed at 0)");
-       return cfg_reply_none();
+               return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                                  " (max secondary nodes fixed at 0)");
+       return tipc_cfg_reply_none();
 }
 
 static struct sk_buff *cfg_set_netid(void)
@@ -458,22 +458,22 @@ static struct sk_buff *cfg_set_netid(void)
        u32 value;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
        value = *(u32 *)TLV_DATA(req_tlv_area);
        value = ntohl(value);
        if (value != delimit(value, 1, 9999))
-               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                             " (network id must be 1-9999)");
+               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                                  " (network id must be 1-9999)");
 
        if (tipc_own_addr)
-               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
-                                             " (cannot change network id once part of network)");
+               return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                                  " (cannot change network id once part of network)");
        
        return set_net_max(value, &tipc_net_id);
 }
 
-struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
-                          int request_space, int reply_headroom)
+struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
+                               int request_space, int reply_headroom)
 {
        struct sk_buff *rep_tlv_buf;
 
@@ -490,19 +490,19 @@ struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
        if (likely(orig_node == tipc_own_addr)) {
                /* command is permitted */
        } else if (cmd >= 0x8000) {
-               rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
-                                                    " (cannot be done remotely)");
+               rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                                         " (cannot be done remotely)");
                goto exit;
        } else if (!tipc_remote_management) {
-               rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NO_REMOTE);
+               rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NO_REMOTE);
                goto exit;
        }
        else if (cmd >= 0x4000) {
                u32 domain = 0;
 
-               if ((nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) ||
+               if ((tipc_nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) ||
                    (domain != orig_node)) {
-                       rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR);
+                       rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR);
                        goto exit;
                }
        }
@@ -511,50 +511,50 @@ struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
 
        switch (cmd) {
        case TIPC_CMD_NOOP:
-               rep_tlv_buf = cfg_reply_none();
+               rep_tlv_buf = tipc_cfg_reply_none();
                break;
        case TIPC_CMD_GET_NODES:
-               rep_tlv_buf = node_get_nodes(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_node_get_nodes(req_tlv_area, req_tlv_space);
                break;
        case TIPC_CMD_GET_LINKS:
-               rep_tlv_buf = node_get_links(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_node_get_links(req_tlv_area, req_tlv_space);
                break;
        case TIPC_CMD_SHOW_LINK_STATS:
-               rep_tlv_buf = link_cmd_show_stats(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_link_cmd_show_stats(req_tlv_area, req_tlv_space);
                break;
        case TIPC_CMD_RESET_LINK_STATS:
-               rep_tlv_buf = link_cmd_reset_stats(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_link_cmd_reset_stats(req_tlv_area, req_tlv_space);
                break;
        case TIPC_CMD_SHOW_NAME_TABLE:
-               rep_tlv_buf = nametbl_get(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_nametbl_get(req_tlv_area, req_tlv_space);
                break;
        case TIPC_CMD_GET_BEARER_NAMES:
-               rep_tlv_buf = bearer_get_names();
+               rep_tlv_buf = tipc_bearer_get_names();
                break;
        case TIPC_CMD_GET_MEDIA_NAMES:
-               rep_tlv_buf = media_get_names();
+               rep_tlv_buf = tipc_media_get_names();
                break;
        case TIPC_CMD_SHOW_PORTS:
-               rep_tlv_buf = port_get_ports();
+               rep_tlv_buf = tipc_port_get_ports();
                break;
 #if 0
        case TIPC_CMD_SHOW_PORT_STATS:
                rep_tlv_buf = port_show_stats(req_tlv_area, req_tlv_space);
                break;
        case TIPC_CMD_RESET_PORT_STATS:
-               rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED);
+               rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED);
                break;
 #endif
        case TIPC_CMD_SET_LOG_SIZE:
-               rep_tlv_buf = log_resize(req_tlv_area, req_tlv_space);
+               rep_tlv_buf = tipc_log_resize(req_tlv_area, req_tlv_space);
                break;
        case TIPC_CMD_DUMP_LOG:
-               rep_tlv_buf = log_dump();
+               rep_tlv_buf = tipc_log_dump();
                break;
        case TIPC_CMD_SET_LINK_TOL:
        case TIPC_CMD_SET_LINK_PRI:
        case TIPC_CMD_SET_LINK_WINDOW:
-               rep_tlv_buf = link_cmd_config(req_tlv_area, req_tlv_space, cmd);
+               rep_tlv_buf = tipc_link_cmd_config(req_tlv_area, req_tlv_space, cmd);
                break;
        case TIPC_CMD_ENABLE_BEARER:
                rep_tlv_buf = cfg_enable_bearer();
@@ -593,31 +593,31 @@ struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
                rep_tlv_buf = cfg_set_netid();
                break;
        case TIPC_CMD_GET_REMOTE_MNG:
-               rep_tlv_buf = cfg_reply_unsigned(tipc_remote_management);
+               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_remote_management);
                break;
        case TIPC_CMD_GET_MAX_PORTS:
-               rep_tlv_buf = cfg_reply_unsigned(tipc_max_ports);
+               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
                break;
        case TIPC_CMD_GET_MAX_PUBL:
-               rep_tlv_buf = cfg_reply_unsigned(tipc_max_publications);
+               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_publications);
                break;
        case TIPC_CMD_GET_MAX_SUBSCR:
-               rep_tlv_buf = cfg_reply_unsigned(tipc_max_subscriptions);
+               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_subscriptions);
                break;
        case TIPC_CMD_GET_MAX_ZONES:
-               rep_tlv_buf = cfg_reply_unsigned(tipc_max_zones);
+               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_zones);
                break;
        case TIPC_CMD_GET_MAX_CLUSTERS:
-               rep_tlv_buf = cfg_reply_unsigned(tipc_max_clusters);
+               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_clusters);
                break;
        case TIPC_CMD_GET_MAX_NODES:
-               rep_tlv_buf = cfg_reply_unsigned(tipc_max_nodes);
+               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_nodes);
                break;
        case TIPC_CMD_GET_MAX_SLAVES:
-               rep_tlv_buf = cfg_reply_unsigned(tipc_max_slaves);
+               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_slaves);
                break;
        case TIPC_CMD_GET_NETID:
-               rep_tlv_buf = cfg_reply_unsigned(tipc_net_id);
+               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
                break;
        default:
                rep_tlv_buf = NULL;
@@ -655,11 +655,11 @@ static void cfg_named_msg_event(void *userdata,
 
        /* Generate reply for request (if can't, return request) */
 
-       rep_buf = cfg_do_cmd(orig->node,
-                            ntohs(req_hdr->tcm_type), 
-                            msg + sizeof(*req_hdr),
-                            size - sizeof(*req_hdr),
-                            BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
+       rep_buf = tipc_cfg_do_cmd(orig->node,
+                                 ntohs(req_hdr->tcm_type), 
+                                 msg + sizeof(*req_hdr),
+                                 size - sizeof(*req_hdr),
+                                 BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
        if (rep_buf) {
                skb_push(rep_buf, sizeof(*rep_hdr));
                rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data;
@@ -675,7 +675,7 @@ static void cfg_named_msg_event(void *userdata,
        tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len);
 }
 
-int cfg_init(void)
+int tipc_cfg_init(void)
 {
        struct tipc_name_seq seq;
        int res;
@@ -696,7 +696,7 @@ int cfg_init(void)
 
        seq.type = TIPC_CFG_SRV;
        seq.lower = seq.upper = tipc_own_addr;
-       res = nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq);
+       res = tipc_nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq);
        if (res)
                goto failed;
 
@@ -709,7 +709,7 @@ failed:
        return res;
 }
 
-void cfg_stop(void)
+void tipc_cfg_stop(void)
 {
        if (mng.user_ref) {
                tipc_detach(mng.user_ref);
index 646377d40454a5caa52abba1aeb59ab78d54b57b..7a728f954d84b3b4ab53f78972b120b40c3036b6 100644 (file)
 
 /* ---------------------------------------------------------------------- */
 
-#include <linux/tipc.h>
-#include <linux/tipc_config.h>
+#include "core.h"
 #include "link.h"
 
-struct sk_buff *cfg_reply_alloc(int payload_size);
-int cfg_append_tlv(struct sk_buff *buf, int tlv_type, 
-                  void *tlv_data, int tlv_data_size);
-struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value);
-struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string);
+struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
+int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type, 
+                       void *tlv_data, int tlv_data_size);
+struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value);
+struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string);
 
-static inline struct sk_buff *cfg_reply_none(void)
+static inline struct sk_buff *tipc_cfg_reply_none(void)
 {
-       return cfg_reply_alloc(0);
+       return tipc_cfg_reply_alloc(0);
 }
 
-static inline struct sk_buff *cfg_reply_unsigned(u32 value)
+static inline struct sk_buff *tipc_cfg_reply_unsigned(u32 value)
 {
-       return cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
+       return tipc_cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
 }
 
-static inline struct sk_buff *cfg_reply_error_string(char *string)
+static inline struct sk_buff *tipc_cfg_reply_error_string(char *string)
 {
-       return cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string);
+       return tipc_cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string);
 }
 
-static inline struct sk_buff *cfg_reply_ultra_string(char *string)
+static inline struct sk_buff *tipc_cfg_reply_ultra_string(char *string)
 {
-       return cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
+       return tipc_cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
 }
 
-struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, 
-                          const void *req_tlv_area, int req_tlv_space, 
-                          int headroom);
+struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, 
+                               const void *req_tlv_area, int req_tlv_space, 
+                               int headroom);
 
-void cfg_link_event(u32 addr, char *name, int up);
-int  cfg_init(void);
-void cfg_stop(void);
+void tipc_cfg_link_event(u32 addr, char *name, int up);
+int  tipc_cfg_init(void);
+void tipc_cfg_stop(void);
 
 #endif
index e83ac06e31ba9662cbb4b1544f59881f31dfc5c3..3d0a8ee4e1d3986f0f45f12ff68c40ce13083d38 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/version.h>
 #include <linux/random.h>
 
 #include "core.h"
 #include "subscr.h"
 #include "config.h"
 
-int  eth_media_start(void);
-void eth_media_stop(void);
-int  handler_start(void);
-void handler_stop(void);
-int  socket_init(void);
-void socket_stop(void);
-int  netlink_start(void);
-void netlink_stop(void);
+int  tipc_eth_media_start(void);
+void tipc_eth_media_stop(void);
+int  tipc_handler_start(void);
+void tipc_handler_stop(void);
+int  tipc_socket_init(void);
+void tipc_socket_stop(void);
+int  tipc_netlink_start(void);
+void tipc_netlink_stop(void);
 
 #define MOD_NAME "tipc_start: "
 
@@ -113,56 +112,56 @@ int tipc_get_mode(void)
 }
 
 /**
- * stop_net - shut down TIPC networking sub-systems
+ * tipc_core_stop_net - shut down TIPC networking sub-systems
  */
 
-void stop_net(void)
+void tipc_core_stop_net(void)
 {
-       eth_media_stop();
-       tipc_stop_net();
+       tipc_eth_media_stop();
+       tipc_net_stop();
 }
 
 /**
  * start_net - start TIPC networking sub-systems
  */
 
-int start_net(void)
+int tipc_core_start_net(void)
 {
        int res;
 
-       if ((res = tipc_start_net()) ||
-           (res = eth_media_start())) {
-               stop_net();
+       if ((res = tipc_net_start()) ||
+           (res = tipc_eth_media_start())) {
+               tipc_core_stop_net();
        }
        return res;
 }
 
 /**
- * stop_core - switch TIPC from SINGLE NODE to NOT RUNNING mode
+ * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
  */
 
-void stop_core(void)
+void tipc_core_stop(void)
 {
        if (tipc_mode != TIPC_NODE_MODE)
                return;
 
        tipc_mode = TIPC_NOT_RUNNING;
 
-       netlink_stop();
-       handler_stop();
-       cfg_stop();
-       subscr_stop();
-       reg_stop();
-       nametbl_stop();
-       ref_table_stop();
-       socket_stop();
+       tipc_netlink_stop();
+       tipc_handler_stop();
+       tipc_cfg_stop();
+       tipc_subscr_stop();
+       tipc_reg_stop();
+       tipc_nametbl_stop();
+       tipc_ref_table_stop();
+       tipc_socket_stop();
 }
 
 /**
- * start_core - switch TIPC from NOT RUNNING to SINGLE NODE mode
+ * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode
  */
 
-int start_core(void)
+int tipc_core_start(void)
 {
        int res;
 
@@ -172,16 +171,16 @@ int start_core(void)
        get_random_bytes(&tipc_random, sizeof(tipc_random));
        tipc_mode = TIPC_NODE_MODE;
 
-       if ((res = handler_start()) || 
-           (res = ref_table_init(tipc_max_ports + tipc_max_subscriptions,
-                                 tipc_random)) ||
-           (res = reg_start()) ||
-           (res = nametbl_init()) ||
-            (res = k_signal((Handler)subscr_start, 0)) ||
-           (res = k_signal((Handler)cfg_init, 0)) || 
-           (res = netlink_start()) ||
-           (res = socket_init())) {
-               stop_core();
+       if ((res = tipc_handler_start()) || 
+           (res = tipc_ref_table_init(tipc_max_ports + tipc_max_subscriptions,
+                                      tipc_random)) ||
+           (res = tipc_reg_start()) ||
+           (res = tipc_nametbl_init()) ||
+            (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) ||
+           (res = tipc_k_signal((Handler)tipc_cfg_init, 0)) || 
+           (res = tipc_netlink_start()) ||
+           (res = tipc_socket_init())) {
+               tipc_core_stop();
        }
        return res;
 }
@@ -191,7 +190,7 @@ static int __init tipc_init(void)
 {
        int res;
 
-       log_reinit(CONFIG_TIPC_LOG);
+       tipc_log_reinit(CONFIG_TIPC_LOG);
        info("Activated (compiled " __DATE__ " " __TIME__ ")\n");
 
        tipc_own_addr = 0;
@@ -205,7 +204,7 @@ static int __init tipc_init(void)
        tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
        tipc_net_id = 4711;
 
-       if ((res = start_core()))
+       if ((res = tipc_core_start()))
                err("Unable to start in single node mode\n");
        else    
                info("Started in single node mode\n");
@@ -214,10 +213,10 @@ static int __init tipc_init(void)
 
 static void __exit tipc_exit(void)
 {
-       stop_net();
-       stop_core();
+       tipc_core_stop_net();
+       tipc_core_stop();
        info("Deactivated\n");
-       log_stop();
+       tipc_log_stop();
 }
 
 module_init(tipc_init);
index b69b60b2cc866d6c2bb3db751fa8ac19e2692077..1f2e8b27a13f1eaeea0acd73fbd743d3fe985832 100644 (file)
 #ifndef _TIPC_CORE_H
 #define _TIPC_CORE_H
 
+#include <linux/tipc.h>
+#include <linux/tipc_config.h>
+#include <net/tipc/tipc_msg.h>
+#include <net/tipc/tipc_port.h>
+#include <net/tipc/tipc_bearer.h>
 #include <net/tipc/tipc.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
@@ -60,9 +65,9 @@
 #define assert(i)  BUG_ON(!(i))
 
 struct tipc_msg;
-extern struct print_buf *CONS, *LOG;
-extern struct print_buf *TEE(struct print_buf *, struct print_buf *);
-void msg_print(struct print_buf*,struct tipc_msg *,const char*);
+extern struct print_buf *TIPC_CONS, *TIPC_LOG;
+extern struct print_buf *TIPC_TEE(struct print_buf *, struct print_buf *);
+void tipc_msg_print(struct print_buf*,struct tipc_msg *,const char*);
 void tipc_printf(struct print_buf *, const char *fmt, ...);
 void tipc_dump(struct print_buf*,const char *fmt, ...);
 
@@ -79,7 +84,7 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
 #define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg)
 
 #define dbg(fmt, arg...)  do {if (DBG_OUTPUT) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0)
-#define msg_dbg(msg, txt) do {if (DBG_OUTPUT) msg_print(DBG_OUTPUT, msg, txt);} while(0)
+#define msg_dbg(msg, txt) do {if (DBG_OUTPUT) tipc_msg_print(DBG_OUTPUT, msg, txt);} while(0)
 #define dump(fmt, arg...) do {if (DBG_OUTPUT) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0)
 
 
@@ -89,15 +94,15 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
  * here, or on a per .c file basis, by redefining these symbols.  The following
  * print buffer options are available:
  *
- * NULL                        : Output to null print buffer (i.e. print nowhere)
- * CONS                        : Output to system console
- * LOG                 : Output to TIPC log buffer 
- * &buf                : Output to user-defined buffer (struct print_buf *)
- * TEE(&buf_a,&buf_b)  : Output to two print buffers (eg. TEE(CONS,LOG) )
+ * NULL                                : Output to null print buffer (i.e. print nowhere)
+ * TIPC_CONS                   : Output to system console
+ * TIPC_LOG                    : Output to TIPC log buffer 
+ * &buf                                : Output to user-defined buffer (struct print_buf *)
+ * TIPC_TEE(&buf_a,&buf_b)     : Output to two print buffers (eg. TIPC_TEE(TIPC_CONS,TIPC_LOG) )
  */
 
 #ifndef TIPC_OUTPUT
-#define TIPC_OUTPUT TEE(CONS,LOG)
+#define TIPC_OUTPUT TIPC_TEE(TIPC_CONS,TIPC_LOG)
 #endif
 
 #ifndef DBG_OUTPUT
@@ -162,10 +167,10 @@ extern atomic_t tipc_user_count;
  * Routines available to privileged subsystems
  */
 
-extern int  start_core(void);
-extern void stop_core(void);
-extern int  start_net(void);
-extern void stop_net(void);
+extern int  tipc_core_start(void);
+extern void tipc_core_stop(void);
+extern int  tipc_core_start_net(void);
+extern void tipc_core_stop_net(void);
 
 static inline int delimit(int val, int min, int max)
 {
@@ -183,7 +188,7 @@ static inline int delimit(int val, int min, int max)
 
 typedef void (*Handler) (unsigned long);
 
-u32 k_signal(Handler routine, unsigned long argument);
+u32 tipc_k_signal(Handler routine, unsigned long argument);
 
 /**
  * k_init_timer - initialize a timer
index 7ed60a1cfbb80e86f1d768b5f5f0f925ed700249..4f4beefa783037f772cb4c49eb4fe02b253990dd 100644 (file)
@@ -44,10 +44,10 @@ static char print_string[MAX_STRING];
 static spinlock_t print_lock = SPIN_LOCK_UNLOCKED;
 
 static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
-struct print_buf *CONS = &cons_buf;
+struct print_buf *TIPC_CONS = &cons_buf;
 
 static struct print_buf log_buf = { NULL, 0, NULL, NULL };
-struct print_buf *LOG = &log_buf;
+struct print_buf *TIPC_LOG = &log_buf;
 
 
 #define FORMAT(PTR,LEN,FMT) \
@@ -66,15 +66,15 @@ struct print_buf *LOG = &log_buf;
  *    simultaneous use of the print buffer(s) being manipulated.
  * 2) tipc_printf() uses 'print_lock' to prevent simultaneous use of
  *    'print_string' and to protect its print buffer(s).
- * 3) TEE() uses 'print_lock' to protect its print buffer(s).
- * 4) Routines of the form log_XXX() uses 'print_lock' to protect LOG.
+ * 3) TIPC_TEE() uses 'print_lock' to protect its print buffer(s).
+ * 4) Routines of the form log_XXX() uses 'print_lock' to protect TIPC_LOG.
  */
 
 /**
- * printbuf_init - initialize print buffer to empty
+ * tipc_printbuf_init - initialize print buffer to empty
  */
 
-void printbuf_init(struct print_buf *pb, char *raw, u32 sz)
+void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 sz)
 {
        if (!pb || !raw || (sz < (MAX_STRING + 1)))
                return;
@@ -87,26 +87,26 @@ void printbuf_init(struct print_buf *pb, char *raw, u32 sz)
 }
 
 /**
- * printbuf_reset - reinitialize print buffer to empty state
+ * tipc_printbuf_reset - reinitialize print buffer to empty state
  */
 
-void printbuf_reset(struct print_buf *pb)
+void tipc_printbuf_reset(struct print_buf *pb)
 {
        if (pb && pb->buf)
-               printbuf_init(pb, pb->buf, pb->size);
+               tipc_printbuf_init(pb, pb->buf, pb->size);
 }
 
 /**
- * printbuf_empty - test if print buffer is in empty state
+ * tipc_printbuf_empty - test if print buffer is in empty state
  */
 
-int printbuf_empty(struct print_buf *pb)
+int tipc_printbuf_empty(struct print_buf *pb)
 {
        return (!pb || !pb->buf || (pb->crs == pb->buf));
 }
 
 /**
- * printbuf_validate - check for print buffer overflow
+ * tipc_printbuf_validate - check for print buffer overflow
  * 
  * Verifies that a print buffer has captured all data written to it. 
  * If data has been lost, linearize buffer and prepend an error message
@@ -114,7 +114,7 @@ int printbuf_empty(struct print_buf *pb)
  * Returns length of print buffer data string (including trailing NULL)
  */
 
-int printbuf_validate(struct print_buf *pb)
+int tipc_printbuf_validate(struct print_buf *pb)
 {
         char *err = "             *** PRINT BUFFER WRAPPED AROUND ***\n";
         char *cp_buf;
@@ -126,13 +126,13 @@ int printbuf_validate(struct print_buf *pb)
        if (pb->buf[pb->size - 1] == '\0') {
                 cp_buf = kmalloc(pb->size, GFP_ATOMIC);
                 if (cp_buf != NULL){
-                        printbuf_init(&cb, cp_buf, pb->size);
-                        printbuf_move(&cb, pb);
-                        printbuf_move(pb, &cb);
+                        tipc_printbuf_init(&cb, cp_buf, pb->size);
+                        tipc_printbuf_move(&cb, pb);
+                        tipc_printbuf_move(pb, &cb);
                         kfree(cp_buf);
                         memcpy(pb->buf, err, strlen(err));
                 } else {
-                        printbuf_reset(pb);
+                        tipc_printbuf_reset(pb);
                         tipc_printf(pb, err);
                 }
        }
@@ -140,13 +140,13 @@ int printbuf_validate(struct print_buf *pb)
 }
 
 /**
- * printbuf_move - move print buffer contents to another print buffer
+ * tipc_printbuf_move - move print buffer contents to another print buffer
  * 
  * Current contents of destination print buffer (if any) are discarded.
  * Source print buffer becomes empty if a successful move occurs.
  */
 
-void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
+void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
 {
        int len;
 
@@ -156,12 +156,12 @@ void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
                return;
 
        if (!pb_from || !pb_from->buf) {
-               printbuf_reset(pb_to);
+               tipc_printbuf_reset(pb_to);
                return;
        }
 
        if (pb_to->size < pb_from->size) {
-               printbuf_reset(pb_to);
+               tipc_printbuf_reset(pb_to);
                tipc_printf(pb_to, "*** PRINT BUFFER OVERFLOW ***");
                return;
        }
@@ -179,7 +179,7 @@ void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
        strcpy(pb_to->crs, pb_from->buf);
        pb_to->crs += len;
 
-       printbuf_reset(pb_from);
+       tipc_printbuf_reset(pb_from);
 }
 
 /**
@@ -199,7 +199,7 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
                strcpy(print_string, "*** STRING TOO LONG ***");
 
        while (pb) {
-               if (pb == CONS)
+               if (pb == TIPC_CONS)
                        printk(print_string);
                else if (pb->buf) {
                        chars_left = pb->buf + pb->size - pb->crs - 1;
@@ -223,10 +223,10 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
 }
 
 /**
- * TEE - perform next output operation on both print buffers  
+ * TIPC_TEE - perform next output operation on both print buffers  
  */
 
-struct print_buf *TEE(struct print_buf *b0, struct print_buf *b1)
+struct print_buf *TIPC_TEE(struct print_buf *b0, struct print_buf *b1)
 {
        struct print_buf *pb = b0;
 
@@ -294,96 +294,96 @@ void tipc_dump(struct print_buf *pb, const char *fmt, ...)
        int len;
 
        spin_lock_bh(&print_lock);
-       FORMAT(CONS->buf, len, fmt);
-       printk(CONS->buf);
+       FORMAT(TIPC_CONS->buf, len, fmt);
+       printk(TIPC_CONS->buf);
 
        for (; pb; pb = pb->next) {
-               if (pb == CONS)
+               if (pb == TIPC_CONS)
                        continue;
                printk("\n---- Start of dump,%s log ----\n\n", 
-                      (pb == LOG) ? "global" : "local");
+                      (pb == TIPC_LOG) ? "global" : "local");
                printbuf_dump(pb);
-               printbuf_reset(pb);
+               tipc_printbuf_reset(pb);
                printk("\n-------- End of dump --------\n");
        }
        spin_unlock_bh(&print_lock);
 }
 
 /**
- * log_stop - free up TIPC log print buffer 
+ * tipc_log_stop - free up TIPC log print buffer 
  */
 
-void log_stop(void)
+void tipc_log_stop(void)
 {
        spin_lock_bh(&print_lock);
-       if (LOG->buf) {
-               kfree(LOG->buf);
-               LOG->buf = NULL;
+       if (TIPC_LOG->buf) {
+               kfree(TIPC_LOG->buf);
+               TIPC_LOG->buf = NULL;
        }
        spin_unlock_bh(&print_lock);
 }
 
 /**
- * log_reinit - set TIPC log print buffer to specified size
+ * tipc_log_reinit - set TIPC log print buffer to specified size
  */
 
-void log_reinit(int log_size)
+void tipc_log_reinit(int log_size)
 {
-       log_stop();
+       tipc_log_stop();
 
        if (log_size) {
                if (log_size <= MAX_STRING)
                        log_size = MAX_STRING + 1;
                spin_lock_bh(&print_lock);
-               printbuf_init(LOG, kmalloc(log_size, GFP_ATOMIC), log_size);
+               tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC), log_size);
                spin_unlock_bh(&print_lock);
        }
 }
 
 /**
- * log_resize - reconfigure size of TIPC log buffer
+ * tipc_log_resize - reconfigure size of TIPC log buffer
  */
 
-struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space)
 {
        u32 value;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        value = *(u32 *)TLV_DATA(req_tlv_area);
        value = ntohl(value);
        if (value != delimit(value, 0, 32768))
-               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                             " (log size must be 0-32768)");
-       log_reinit(value);
-       return cfg_reply_none();
+               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                                  " (log size must be 0-32768)");
+       tipc_log_reinit(value);
+       return tipc_cfg_reply_none();
 }
 
 /**
- * log_dump - capture TIPC log buffer contents in configuration message
+ * tipc_log_dump - capture TIPC log buffer contents in configuration message
  */
 
-struct sk_buff *log_dump(void)
+struct sk_buff *tipc_log_dump(void)
 {
        struct sk_buff *reply;
 
        spin_lock_bh(&print_lock);
-       if (!LOG->buf)
-               reply = cfg_reply_ultra_string("log not activated\n");
-       else if (printbuf_empty(LOG))
-               reply = cfg_reply_ultra_string("log is empty\n");
+       if (!TIPC_LOG->buf)
+               reply = tipc_cfg_reply_ultra_string("log not activated\n");
+       else if (tipc_printbuf_empty(TIPC_LOG))
+               reply = tipc_cfg_reply_ultra_string("log is empty\n");
        else {
                struct tlv_desc *rep_tlv;
                struct print_buf pb;
                int str_len;
 
-               str_len = min(LOG->size, 32768u);
-               reply = cfg_reply_alloc(TLV_SPACE(str_len));
+               str_len = min(TIPC_LOG->size, 32768u);
+               reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len));
                if (reply) {
                        rep_tlv = (struct tlv_desc *)reply->data;
-                       printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
-                       printbuf_move(&pb, LOG);
+                       tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
+                       tipc_printbuf_move(&pb, TIPC_LOG);
                        str_len = strlen(TLV_DATA(rep_tlv)) + 1;
                        skb_put(reply, TLV_SPACE(str_len));
                        TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
index c6b2a64c224f07d4a3cfddd2d3e758a28a81e0a1..227f050d2a52742552a8af61650f1b12bc135c6c 100644 (file)
@@ -44,16 +44,16 @@ struct print_buf {
        struct print_buf *next;
 };
 
-void printbuf_init(struct print_buf *pb, char *buf, u32 sz);
-void printbuf_reset(struct print_buf *pb);
-int  printbuf_empty(struct print_buf *pb);
-int  printbuf_validate(struct print_buf *pb);
-void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
+void tipc_printbuf_init(struct print_buf *pb, char *buf, u32 sz);
+void tipc_printbuf_reset(struct print_buf *pb);
+int  tipc_printbuf_empty(struct print_buf *pb);
+int  tipc_printbuf_validate(struct print_buf *pb);
+void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
 
-void log_reinit(int log_size);
-void log_stop(void);
+void tipc_log_reinit(int log_size);
+void tipc_log_stop(void);
 
-struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space);
-struct sk_buff *log_dump(void);
+struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space);
+struct sk_buff *tipc_log_dump(void);
 
 #endif
index b106ef1621cce4c7c2d33e0a173d98f4c41b11df..53ba4630c10d86bd8591424f80ab178020bc45a3 100644 (file)
@@ -93,7 +93,7 @@ int disc_create_link(const struct tipc_link_create *argv)
  * disc_lost_link(): A link has lost contact
  */
 
-void disc_link_event(u32 addr, char *name, int up) 
+void tipc_disc_link_event(u32 addr, char *name, int up) 
 {
        if (in_own_cluster(addr))
                return;
@@ -103,17 +103,17 @@ void disc_link_event(u32 addr, char *name, int up)
 }
 
 /** 
- * disc_init_msg - initialize a link setup message
+ * tipc_disc_init_msg - initialize a link setup message
  * @type: message type (request or response)
  * @req_links: number of links associated with message
  * @dest_domain: network domain of node(s) which should respond to message
  * @b_ptr: ptr to bearer issuing message
  */
 
-struct sk_buff *disc_init_msg(u32 type,
-                             u32 req_links,
-                             u32 dest_domain,
-                             struct bearer *b_ptr)
+struct sk_buff *tipc_disc_init_msg(u32 type,
+                                  u32 req_links,
+                                  u32 dest_domain,
+                                  struct bearer *b_ptr)
 {
        struct sk_buff *buf = buf_acquire(DSC_H_SIZE);
        struct tipc_msg *msg;
@@ -132,11 +132,11 @@ struct sk_buff *disc_init_msg(u32 type,
 }
 
 /**
- * disc_recv_msg - handle incoming link setup message (request or response)
+ * tipc_disc_recv_msg - handle incoming link setup message (request or response)
  * @buf: buffer containing message
  */
 
-void disc_recv_msg(struct sk_buff *buf)
+void tipc_disc_recv_msg(struct sk_buff *buf)
 {
        struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
        struct link *link;
@@ -153,9 +153,9 @@ void disc_recv_msg(struct sk_buff *buf)
 
        if (net_id != tipc_net_id)
                return;
-       if (!addr_domain_valid(dest))
+       if (!tipc_addr_domain_valid(dest))
                return;
-       if (!addr_node_valid(orig))
+       if (!tipc_addr_node_valid(orig))
                return;
        if (orig == tipc_own_addr)
                return;
@@ -169,11 +169,11 @@ void disc_recv_msg(struct sk_buff *buf)
                /* Always accept link here */
                struct sk_buff *rbuf;
                struct tipc_media_addr *addr;
-               struct node *n_ptr = node_find(orig);
+               struct node *n_ptr = tipc_node_find(orig);
                int link_up;
                dbg(" in own cluster\n");
                if (n_ptr == NULL) {
-                       n_ptr = node_create(orig);
+                       n_ptr = tipc_node_create(orig);
                }
                if (n_ptr == NULL) {
                        warn("Memory squeeze; Failed to create node\n");
@@ -183,7 +183,7 @@ void disc_recv_msg(struct sk_buff *buf)
                link = n_ptr->links[b_ptr->identity];
                if (!link) {
                        dbg("creating link\n");
-                       link = link_create(b_ptr, orig, &media_addr);
+                       link = tipc_link_create(b_ptr, orig, &media_addr);
                        if (!link) {
                                spin_unlock_bh(&n_ptr->lock);                
                                return;
@@ -196,13 +196,13 @@ void disc_recv_msg(struct sk_buff *buf)
                        warn("New bearer address for %s\n", 
                             addr_string_fill(addr_string, orig));
                        memcpy(addr, &media_addr, sizeof(*addr));
-                       link_reset(link);     
+                       tipc_link_reset(link);     
                }
-               link_up = link_is_up(link);
+               link_up = tipc_link_is_up(link);
                spin_unlock_bh(&n_ptr->lock);                
                if ((type == DSC_RESP_MSG) || link_up)
                        return;
-               rbuf = disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
+               rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
                if (rbuf != NULL) {
                        msg_dbg(buf_msg(rbuf),"SEND:");
                        b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
@@ -212,11 +212,11 @@ void disc_recv_msg(struct sk_buff *buf)
 }
 
 /**
- * disc_stop_link_req - stop sending periodic link setup requests
+ * tipc_disc_stop_link_req - stop sending periodic link setup requests
  * @req: ptr to link request structure
  */
 
-void disc_stop_link_req(struct link_req *req) 
+void tipc_disc_stop_link_req(struct link_req *req) 
 {
        if (!req)
                return;
@@ -228,11 +228,11 @@ void disc_stop_link_req(struct link_req *req)
 } 
 
 /**
- * disc_update_link_req - update frequency of periodic link setup requests
+ * tipc_disc_update_link_req - update frequency of periodic link setup requests
  * @req: ptr to link request structure
  */
 
-void disc_update_link_req(struct link_req *req) 
+void tipc_disc_update_link_req(struct link_req *req) 
 {
        if (!req)
                return;
@@ -282,7 +282,7 @@ static void disc_timeout(struct link_req *req)
 }
 
 /**
- * disc_init_link_req - start sending periodic link setup requests
+ * tipc_disc_init_link_req - start sending periodic link setup requests
  * @b_ptr: ptr to bearer issuing requests
  * @dest: destination address for request messages
  * @dest_domain: network domain of node(s) which should respond to message
@@ -291,10 +291,10 @@ static void disc_timeout(struct link_req *req)
  * Returns pointer to link request structure, or NULL if unable to create.
  */
 
-struct link_req *disc_init_link_req(struct bearer *b_ptr, 
-                                   const struct tipc_media_addr *dest,
-                                   u32 dest_domain,
-                                   u32 req_links) 
+struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr, 
+                                        const struct tipc_media_addr *dest,
+                                        u32 dest_domain,
+                                        u32 req_links) 
 {
        struct link_req *req;
 
@@ -302,7 +302,7 @@ struct link_req *disc_init_link_req(struct bearer *b_ptr,
        if (!req)
                return NULL;
 
-       req->buf = disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
+       req->buf = tipc_disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
        if (!req->buf) {
                kfree(req);
                return NULL;
index 2a6114d91626030e1b1538a8e5f3715f22b5b01f..0454fd1ae7f357cd0f5596981a04da632b7a7a53 100644 (file)
 #ifndef _TIPC_DISCOVER_H
 #define _TIPC_DISCOVER_H
 
-#include <linux/tipc.h>
+#include "core.h"
 
 struct link_req;
 
-struct link_req *disc_init_link_req(struct bearer *b_ptr, 
-                                   const struct tipc_media_addr *dest,
-                                   u32 dest_domain,
-                                   u32 req_links);
-void disc_update_link_req(struct link_req *req);
-void disc_stop_link_req(struct link_req *req);
+struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr, 
+                                        const struct tipc_media_addr *dest,
+                                        u32 dest_domain,
+                                        u32 req_links);
+void tipc_disc_update_link_req(struct link_req *req);
+void tipc_disc_stop_link_req(struct link_req *req);
 
-void disc_recv_msg(struct sk_buff *buf);
+void tipc_disc_recv_msg(struct sk_buff *buf);
 
-void disc_link_event(u32 addr, char *name, int up);
+void tipc_disc_link_event(u32 addr, char *name, int up);
 #if 0
 int  disc_create_link(const struct tipc_link_create *argv);
 #endif
index 34d0462db3aa139d1c25d4ab734b015214eb7db7..1f8d83b9c8b46440e1ad8bbce9f0fd61385bfef2 100644 (file)
 #include <net/tipc/tipc_bearer.h>
 #include <net/tipc/tipc_msg.h>
 #include <linux/netdevice.h>
-#include <linux/version.h>
 
 #define MAX_ETH_BEARERS                2
-#define TIPC_PROTOCOL          0x88ca
-#define ETH_LINK_PRIORITY      10
+#define ETH_LINK_PRIORITY      TIPC_DEF_LINK_PRI
 #define ETH_LINK_TOLERANCE     TIPC_DEF_LINK_TOL
-
+#define ETH_LINK_WINDOW                TIPC_DEF_LINK_WIN
 
 /**
  * struct eth_bearer - Ethernet bearer data structure
@@ -78,7 +76,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
                clone->nh.raw = clone->data;
                dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
                clone->dev = dev;
-               dev->hard_header(clone, dev, TIPC_PROTOCOL
+               dev->hard_header(clone, dev, ETH_P_TIPC
                                 &dest->dev_addr.eth_addr,
                                 dev->dev_addr, clone->len);
                dev_queue_xmit(clone);
@@ -141,7 +139,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
                return -EDQUOT;
        if (!eb_ptr->dev) {
                eb_ptr->dev = dev;
-               eb_ptr->tipc_packet_type.type = __constant_htons(TIPC_PROTOCOL);
+               eb_ptr->tipc_packet_type.type = __constant_htons(ETH_P_TIPC);
                eb_ptr->tipc_packet_type.dev = dev;
                eb_ptr->tipc_packet_type.func = recv_msg;
                eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
@@ -240,13 +238,13 @@ static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size
 }
 
 /**
- * eth_media_start - activate Ethernet bearer support
+ * tipc_eth_media_start - activate Ethernet bearer support
  *
  * Register Ethernet media type with TIPC bearer code.  Also register
  * with OS for notifications about device state changes.
  */
 
-int eth_media_start(void)
+int tipc_eth_media_start(void)
 {                       
        struct tipc_media_addr bcast_addr;
        int res;
@@ -260,7 +258,7 @@ int eth_media_start(void)
        res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
                                  enable_bearer, disable_bearer, send_msg, 
                                  eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY, 
-                                 ETH_LINK_TOLERANCE, TIPC_DEF_LINK_WIN);
+                                 ETH_LINK_TOLERANCE, ETH_LINK_WINDOW);
        if (res)
                return res;
 
@@ -273,10 +271,10 @@ int eth_media_start(void)
 }
 
 /**
- * eth_media_stop - deactivate Ethernet bearer support
+ * tipc_eth_media_stop - deactivate Ethernet bearer support
  */
 
-void eth_media_stop(void)
+void tipc_eth_media_stop(void)
 {
        int i;
 
index f320010f8a656583146acafd93282a160b464838..966f70a1b60800012c14a09e7377eeea2414c0d0 100644 (file)
@@ -52,7 +52,7 @@ static void process_signal_queue(unsigned long dummy);
 static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
 
 
-unsigned int k_signal(Handler routine, unsigned long argument)
+unsigned int tipc_k_signal(Handler routine, unsigned long argument)
 {
        struct queue_item *item;
 
@@ -93,7 +93,7 @@ static void process_signal_queue(unsigned long dummy)
        spin_unlock_bh(&qitem_lock);
 }
 
-int handler_start(void)
+int tipc_handler_start(void)
 {
        tipc_queue_item_cache = 
                kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
@@ -107,7 +107,7 @@ int handler_start(void)
        return 0;
 }
 
-void handler_stop(void)
+void tipc_handler_stop(void)
 {
        struct list_head *l, *n;
        struct queue_item *item; 
index 7265f4be47664a29bc084b963c096695086ea64a..511872afa459d1f48291205ee71306fb9b350b44 100644 (file)
@@ -148,12 +148,12 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
 #define LINK_LOG_BUF_SIZE 0
 
 #define dbg_link(fmt, arg...)  do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0)
-#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) msg_print(&l_ptr->print_buf, msg, txt); } while(0)
+#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) tipc_msg_print(&l_ptr->print_buf, msg, txt); } while(0)
 #define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0)
 #define dbg_link_dump() do { \
        if (LINK_LOG_BUF_SIZE) { \
                tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
-               printbuf_move(LOG, &l_ptr->print_buf); \
+               tipc_printbuf_move(LOG, &l_ptr->print_buf); \
        } \
 } while (0)
 
@@ -252,14 +252,14 @@ static inline u32 link_last_sent(struct link *l_ptr)
  *  Simple non-inlined link routines (i.e. referenced outside this file)
  */
 
-int link_is_up(struct link *l_ptr)
+int tipc_link_is_up(struct link *l_ptr)
 {
        if (!l_ptr)
                return 0;
        return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
 }
 
-int link_is_active(struct link *l_ptr)
+int tipc_link_is_active(struct link *l_ptr)
 {
        return ((l_ptr->owner->active_links[0] == l_ptr) ||
                (l_ptr->owner->active_links[1] == l_ptr));
@@ -338,15 +338,15 @@ static int link_name_validate(const char *name, struct link_name *name_parts)
  * link_timeout - handle expiration of link timer
  * @l_ptr: pointer to link
  * 
- * This routine must not grab "net_lock" to avoid a potential deadlock conflict
- * with link_delete().  (There is no risk that the node will be deleted by
- * another thread because link_delete() always cancels the link timer before
- * node_delete() is called.)
+ * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
+ * with tipc_link_delete().  (There is no risk that the node will be deleted by
+ * another thread because tipc_link_delete() always cancels the link timer before
+ * tipc_node_delete() is called.)
  */
 
 static void link_timeout(struct link *l_ptr)
 {
-       node_lock(l_ptr->owner);
+       tipc_node_lock(l_ptr->owner);
 
        /* update counters used in statistical profiling of send traffic */
 
@@ -391,9 +391,9 @@ static void link_timeout(struct link *l_ptr)
        link_state_event(l_ptr, TIMEOUT_EVT);
 
        if (l_ptr->next_out)
-               link_push_queue(l_ptr);
+               tipc_link_push_queue(l_ptr);
 
-       node_unlock(l_ptr->owner);
+       tipc_node_unlock(l_ptr->owner);
 }
 
 static inline void link_set_timer(struct link *l_ptr, u32 time)
@@ -402,7 +402,7 @@ static inline void link_set_timer(struct link *l_ptr, u32 time)
 }
 
 /**
- * link_create - create a new link
+ * tipc_link_create - create a new link
  * @b_ptr: pointer to associated bearer
  * @peer: network address of node at other end of link
  * @media_addr: media address to use when sending messages over link
@@ -410,8 +410,8 @@ static inline void link_set_timer(struct link *l_ptr, u32 time)
  * Returns pointer to link.
  */
 
-struct link *link_create(struct bearer *b_ptr, const u32 peer,
-                        const struct tipc_media_addr *media_addr)
+struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
+                             const struct tipc_media_addr *media_addr)
 {
        struct link *l_ptr;
        struct tipc_msg *msg;
@@ -449,7 +449,7 @@ struct link *link_create(struct bearer *b_ptr, const u32 peer,
        strcpy((char *)msg_data(msg), if_name);
 
        l_ptr->priority = b_ptr->priority;
-       link_set_queue_limits(l_ptr, b_ptr->media->window);
+       tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
 
        link_init_max_pkt(l_ptr);
 
@@ -458,7 +458,7 @@ struct link *link_create(struct bearer *b_ptr, const u32 peer,
 
        link_reset_statistics(l_ptr);
 
-       l_ptr->owner = node_attach_link(l_ptr);
+       l_ptr->owner = tipc_node_attach_link(l_ptr);
        if (!l_ptr->owner) {
                kfree(l_ptr);
                return NULL;
@@ -472,52 +472,52 @@ struct link *link_create(struct bearer *b_ptr, const u32 peer,
                        warn("Memory squeeze; Failed to create link\n");
                        return NULL;
                }
-               printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
+               tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
        }
 
-       k_signal((Handler)link_start, (unsigned long)l_ptr);
+       tipc_k_signal((Handler)tipc_link_start, (unsigned long)l_ptr);
 
-       dbg("link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
+       dbg("tipc_link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
            l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
        
        return l_ptr;
 }
 
 /** 
- * link_delete - delete a link
+ * tipc_link_delete - delete a link
  * @l_ptr: pointer to link
  * 
- * Note: 'net_lock' is write_locked, bearer is locked.
+ * Note: 'tipc_net_lock' is write_locked, bearer is locked.
  * This routine must not grab the node lock until after link timer cancellation
  * to avoid a potential deadlock situation.  
  */
 
-void link_delete(struct link *l_ptr)
+void tipc_link_delete(struct link *l_ptr)
 {
        if (!l_ptr) {
                err("Attempt to delete non-existent link\n");
                return;
        }
 
-       dbg("link_delete()\n");
+       dbg("tipc_link_delete()\n");
 
        k_cancel_timer(&l_ptr->timer);
        
-       node_lock(l_ptr->owner);
-       link_reset(l_ptr);
-       node_detach_link(l_ptr->owner, l_ptr);
-       link_stop(l_ptr);
+       tipc_node_lock(l_ptr->owner);
+       tipc_link_reset(l_ptr);
+       tipc_node_detach_link(l_ptr->owner, l_ptr);
+       tipc_link_stop(l_ptr);
        list_del_init(&l_ptr->link_list);
        if (LINK_LOG_BUF_SIZE)
                kfree(l_ptr->print_buf.buf);
-       node_unlock(l_ptr->owner);
+       tipc_node_unlock(l_ptr->owner);
        k_term_timer(&l_ptr->timer);
        kfree(l_ptr);
 }
 
-void link_start(struct link *l_ptr)
+void tipc_link_start(struct link *l_ptr)
 {
-       dbg("link_start %x\n", l_ptr);
+       dbg("tipc_link_start %x\n", l_ptr);
        link_state_event(l_ptr, STARTING_EVT);
 }
 
@@ -535,8 +535,8 @@ static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
 {
        struct port *p_ptr;
 
-       spin_lock_bh(&port_list_lock);
-       p_ptr = port_lock(origport);
+       spin_lock_bh(&tipc_port_list_lock);
+       p_ptr = tipc_port_lock(origport);
        if (p_ptr) {
                if (!p_ptr->wakeup)
                        goto exit;
@@ -548,13 +548,13 @@ static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
                list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
                l_ptr->stats.link_congs++;
 exit:
-               port_unlock(p_ptr);
+               tipc_port_unlock(p_ptr);
        }
-       spin_unlock_bh(&port_list_lock);
+       spin_unlock_bh(&tipc_port_list_lock);
        return -ELINKCONG;
 }
 
-void link_wakeup_ports(struct link *l_ptr, int all)
+void tipc_link_wakeup_ports(struct link *l_ptr, int all)
 {
        struct port *p_ptr;
        struct port *temp_p_ptr;
@@ -564,7 +564,7 @@ void link_wakeup_ports(struct link *l_ptr, int all)
                win = 100000;
        if (win <= 0)
                return;
-       if (!spin_trylock_bh(&port_list_lock))
+       if (!spin_trylock_bh(&tipc_port_list_lock))
                return;
        if (link_congested(l_ptr))
                goto exit;
@@ -583,7 +583,7 @@ void link_wakeup_ports(struct link *l_ptr, int all)
        }
 
 exit:
-       spin_unlock_bh(&port_list_lock);
+       spin_unlock_bh(&tipc_port_list_lock);
 }
 
 /** 
@@ -606,11 +606,11 @@ static void link_release_outqueue(struct link *l_ptr)
 }
 
 /**
- * link_reset_fragments - purge link's inbound message fragments queue
+ * tipc_link_reset_fragments - purge link's inbound message fragments queue
  * @l_ptr: pointer to link
  */
 
-void link_reset_fragments(struct link *l_ptr)
+void tipc_link_reset_fragments(struct link *l_ptr)
 {
        struct sk_buff *buf = l_ptr->defragm_buf;
        struct sk_buff *next;
@@ -624,11 +624,11 @@ void link_reset_fragments(struct link *l_ptr)
 }
 
 /** 
- * link_stop - purge all inbound and outbound messages associated with link
+ * tipc_link_stop - purge all inbound and outbound messages associated with link
  * @l_ptr: pointer to link
  */
 
-void link_stop(struct link *l_ptr)
+void tipc_link_stop(struct link *l_ptr)
 {
        struct sk_buff *buf;
        struct sk_buff *next;
@@ -647,7 +647,7 @@ void link_stop(struct link *l_ptr)
                buf = next;
        }
 
-       link_reset_fragments(l_ptr);
+       tipc_link_reset_fragments(l_ptr);
 
        buf_discard(l_ptr->proto_msg_queue);
        l_ptr->proto_msg_queue = NULL;
@@ -677,7 +677,7 @@ static void link_send_event(void (*fcn)(u32 a, char *n, int up),
        ev->up = up;
        ev->fcn = fcn;
        memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME);
-       k_signal((Handler)link_recv_event, (unsigned long)ev);
+       tipc_k_signal((Handler)link_recv_event, (unsigned long)ev);
 }
 
 #else
@@ -686,7 +686,7 @@ static void link_send_event(void (*fcn)(u32 a, char *n, int up),
 
 #endif
 
-void link_reset(struct link *l_ptr)
+void tipc_link_reset(struct link *l_ptr)
 {
        struct sk_buff *buf;
        u32 prev_state = l_ptr->state;
@@ -706,13 +706,13 @@ void link_reset(struct link *l_ptr)
        if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
                return;
 
-       node_link_down(l_ptr->owner, l_ptr);
-       bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
+       tipc_node_link_down(l_ptr->owner, l_ptr);
+       tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
 #if 0
-       tipc_printf(CONS, "\nReset link <%s>\n", l_ptr->name);
+       tipc_printf(TIPC_CONS, "\nReset link <%s>\n", l_ptr->name);
        dbg_link_dump();
 #endif
-       if (node_has_active_links(l_ptr->owner) &&
+       if (tipc_node_has_active_links(l_ptr->owner) &&
            l_ptr->owner->permit_changeover) {
                l_ptr->reset_checkpoint = checkpoint;
                l_ptr->exp_msg_count = START_CHANGEOVER;
@@ -730,7 +730,7 @@ void link_reset(struct link *l_ptr)
                buf = next;
        }
        if (!list_empty(&l_ptr->waiting_ports))
-               link_wakeup_ports(l_ptr, 1);
+               tipc_link_wakeup_ports(l_ptr, 1);
 
        l_ptr->retransm_queue_head = 0;
        l_ptr->retransm_queue_size = 0;
@@ -747,20 +747,20 @@ void link_reset(struct link *l_ptr)
        l_ptr->stale_count = 0;
        link_reset_statistics(l_ptr);
 
-       link_send_event(cfg_link_event, l_ptr, 0);
+       link_send_event(tipc_cfg_link_event, l_ptr, 0);
        if (!in_own_cluster(l_ptr->addr))
-               link_send_event(disc_link_event, l_ptr, 0);
+               link_send_event(tipc_disc_link_event, l_ptr, 0);
 }
 
 
 static void link_activate(struct link *l_ptr)
 {
        l_ptr->next_in_no = 1;
-       node_link_up(l_ptr->owner, l_ptr);
-       bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
-       link_send_event(cfg_link_event, l_ptr, 1);
+       tipc_node_link_up(l_ptr->owner, l_ptr);
+       tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
+       link_send_event(tipc_cfg_link_event, l_ptr, 1);
        if (!in_own_cluster(l_ptr->addr))
-               link_send_event(disc_link_event, l_ptr, 1);
+               link_send_event(tipc_disc_link_event, l_ptr, 1);
 }
 
 /**
@@ -799,13 +799,13 @@ static void link_state_event(struct link *l_ptr, unsigned event)
                        dbg_link("TIM ");
                        if (l_ptr->next_in_no != l_ptr->checkpoint) {
                                l_ptr->checkpoint = l_ptr->next_in_no;
-                               if (bclink_acks_missing(l_ptr->owner)) {
-                                       link_send_proto_msg(l_ptr, STATE_MSG, 
-                                                           0, 0, 0, 0, 0);
+                               if (tipc_bclink_acks_missing(l_ptr->owner)) {
+                                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 
+                                                                0, 0, 0, 0, 0);
                                        l_ptr->fsm_msg_cnt++;
                                } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
-                                       link_send_proto_msg(l_ptr, STATE_MSG, 
-                                                           1, 0, 0, 0, 0);
+                                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 
+                                                                1, 0, 0, 0, 0);
                                        l_ptr->fsm_msg_cnt++;
                                }
                                link_set_timer(l_ptr, cont_intv);
@@ -814,16 +814,16 @@ static void link_state_event(struct link *l_ptr, unsigned event)
                        dbg_link(" -> WU\n");
                        l_ptr->state = WORKING_UNKNOWN;
                        l_ptr->fsm_msg_cnt = 0;
-                       link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv / 4);
                        break;
                case RESET_MSG:
                        dbg_link("RES -> RR\n");
-                       link_reset(l_ptr);
+                       tipc_link_reset(l_ptr);
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
-                       link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -844,10 +844,10 @@ static void link_state_event(struct link *l_ptr, unsigned event)
                        break;
                case RESET_MSG:
                        dbg_link("RES -> RR\n");
-                       link_reset(l_ptr);
+                       tipc_link_reset(l_ptr);
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
-                       link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -858,9 +858,9 @@ static void link_state_event(struct link *l_ptr, unsigned event)
                                l_ptr->state = WORKING_WORKING;
                                l_ptr->fsm_msg_cnt = 0;
                                l_ptr->checkpoint = l_ptr->next_in_no;
-                               if (bclink_acks_missing(l_ptr->owner)) {
-                                       link_send_proto_msg(l_ptr, STATE_MSG,
-                                                           0, 0, 0, 0, 0);
+                               if (tipc_bclink_acks_missing(l_ptr->owner)) {
+                                       tipc_link_send_proto_msg(l_ptr, STATE_MSG,
+                                                                0, 0, 0, 0, 0);
                                        l_ptr->fsm_msg_cnt++;
                                }
                                link_set_timer(l_ptr, cont_intv);
@@ -868,18 +868,18 @@ static void link_state_event(struct link *l_ptr, unsigned event)
                                dbg_link("Probing %u/%u,timer = %u ms)\n",
                                         l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
                                         cont_intv / 4);
-                               link_send_proto_msg(l_ptr, STATE_MSG, 
-                                                   1, 0, 0, 0, 0);
+                               tipc_link_send_proto_msg(l_ptr, STATE_MSG, 
+                                                        1, 0, 0, 0, 0);
                                l_ptr->fsm_msg_cnt++;
                                link_set_timer(l_ptr, cont_intv / 4);
                        } else {        /* Link has failed */
                                dbg_link("-> RU (%u probes unanswered)\n",
                                         l_ptr->fsm_msg_cnt);
-                               link_reset(l_ptr);
+                               tipc_link_reset(l_ptr);
                                l_ptr->state = RESET_UNKNOWN;
                                l_ptr->fsm_msg_cnt = 0;
-                               link_send_proto_msg(l_ptr, RESET_MSG,
-                                                   0, 0, 0, 0, 0);
+                               tipc_link_send_proto_msg(l_ptr, RESET_MSG,
+                                                        0, 0, 0, 0, 0);
                                l_ptr->fsm_msg_cnt++;
                                link_set_timer(l_ptr, cont_intv);
                        }
@@ -904,7 +904,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
                        l_ptr->state = WORKING_WORKING;
                        l_ptr->fsm_msg_cnt = 0;
                        link_activate(l_ptr);
-                       link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -913,7 +913,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
                        dbg_link(" -> RR\n");
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
-                       link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -923,7 +923,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
                        /* fall through */
                case TIMEOUT_EVT:
                        dbg_link("TIM \n");
-                       link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -947,7 +947,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
                        l_ptr->state = WORKING_WORKING;
                        l_ptr->fsm_msg_cnt = 0;
                        link_activate(l_ptr);
-                       link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -956,7 +956,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
                        break;
                case TIMEOUT_EVT:
                        dbg_link("TIM\n");
-                       link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
@@ -1023,12 +1023,12 @@ static inline void link_add_to_outqueue(struct link *l_ptr,
 }
 
 /* 
- * link_send_buf() is the 'full path' for messages, called from 
+ * tipc_link_send_buf() is the 'full path' for messages, called from 
  * inside TIPC when the 'fast path' in tipc_send_buf
  * has failed, and from link_send()
  */
 
-int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
+int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
        u32 size = msg_size(msg);
@@ -1051,7 +1051,7 @@ int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
                buf_discard(buf);
                if (imp > CONN_MANAGER) {
                        warn("Resetting <%s>, send queue full", l_ptr->name);
-                       link_reset(l_ptr);
+                       tipc_link_reset(l_ptr);
                }
                return dsz;
        }
@@ -1059,21 +1059,21 @@ int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
        /* Fragmentation needed ? */
 
        if (size > max_packet)
-               return link_send_long_buf(l_ptr, buf);
+               return tipc_link_send_long_buf(l_ptr, buf);
 
        /* Packet can be queued or sent: */
 
        if (queue_size > l_ptr->stats.max_queue_sz)
                l_ptr->stats.max_queue_sz = queue_size;
 
-       if (likely(!bearer_congested(l_ptr->b_ptr, l_ptr) && 
+       if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && 
                   !link_congested(l_ptr))) {
                link_add_to_outqueue(l_ptr, buf, msg);
 
-               if (likely(bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
+               if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
                        l_ptr->unacked_window = 0;
                } else {
-                       bearer_schedule(l_ptr->b_ptr, l_ptr);
+                       tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
                        l_ptr->stats.bearer_congs++;
                        l_ptr->next_out = buf;
                }
@@ -1088,7 +1088,7 @@ int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
 
                if (l_ptr->next_out && 
                    link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
-                       bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
+                       tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
                        return dsz;
                }
 
@@ -1114,38 +1114,38 @@ int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
        if (!l_ptr->next_out)
                l_ptr->next_out = buf;
        link_add_to_outqueue(l_ptr, buf, msg);
-       bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
+       tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
        return dsz;
 }
 
 /* 
- * link_send(): same as link_send_buf(), but the link to use has 
+ * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has 
  * not been selected yet, and the the owner node is not locked
  * Called by TIPC internal users, e.g. the name distributor
  */
 
-int link_send(struct sk_buff *buf, u32 dest, u32 selector)
+int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
 {
        struct link *l_ptr;
        struct node *n_ptr;
        int res = -ELINKCONG;
 
-       read_lock_bh(&net_lock);
-       n_ptr = node_select(dest, selector);
+       read_lock_bh(&tipc_net_lock);
+       n_ptr = tipc_node_select(dest, selector);
        if (n_ptr) {
-               node_lock(n_ptr);
+               tipc_node_lock(n_ptr);
                l_ptr = n_ptr->active_links[selector & 1];
-               dbg("link_send: found link %x for dest %x\n", l_ptr, dest);
+               dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
                if (l_ptr) {
-                       res = link_send_buf(l_ptr, buf);
+                       res = tipc_link_send_buf(l_ptr, buf);
                }
-               node_unlock(n_ptr);
+               tipc_node_unlock(n_ptr);
        } else {
                dbg("Attempt to send msg to unknown node:\n");
                msg_dbg(buf_msg(buf),">>>");
                buf_discard(buf);
        }
-       read_unlock_bh(&net_lock);
+       read_unlock_bh(&tipc_net_lock);
        return res;
 }
 
@@ -1166,14 +1166,14 @@ static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
                if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
                        if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
                                link_add_to_outqueue(l_ptr, buf, msg);
-                               if (likely(bearer_send(l_ptr->b_ptr, buf,
-                                                      &l_ptr->media_addr))) {
+                               if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
+                                                           &l_ptr->media_addr))) {
                                        l_ptr->unacked_window = 0;
                                        msg_dbg(msg,"SENT_FAST:");
                                        return res;
                                }
                                dbg("failed sent fast...\n");
-                               bearer_schedule(l_ptr->b_ptr, l_ptr);
+                               tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
                                l_ptr->stats.bearer_congs++;
                                l_ptr->next_out = buf;
                                return res;
@@ -1182,7 +1182,7 @@ static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
                else
                        *used_max_pkt = link_max_pkt(l_ptr);
        }
-       return link_send_buf(l_ptr, buf);  /* All other cases */
+       return tipc_link_send_buf(l_ptr, buf);  /* All other cases */
 }
 
 /* 
@@ -1200,24 +1200,24 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
        u32 dummy;
 
        if (destnode == tipc_own_addr)
-               return port_recv_msg(buf);
+               return tipc_port_recv_msg(buf);
 
-       read_lock_bh(&net_lock);
-       n_ptr = node_select(destnode, selector);
+       read_lock_bh(&tipc_net_lock);
+       n_ptr = tipc_node_select(destnode, selector);
        if (likely(n_ptr)) {
-               node_lock(n_ptr);
+               tipc_node_lock(n_ptr);
                l_ptr = n_ptr->active_links[selector];
                dbg("send_fast: buf %x selected %x, destnode = %x\n",
                    buf, l_ptr, destnode);
                if (likely(l_ptr)) {
                        res = link_send_buf_fast(l_ptr, buf, &dummy);
-                       node_unlock(n_ptr);
-                       read_unlock_bh(&net_lock);
+                       tipc_node_unlock(n_ptr);
+                       read_unlock_bh(&tipc_net_lock);
                        return res;
                }
-               node_unlock(n_ptr);
+               tipc_node_unlock(n_ptr);
        }
-       read_unlock_bh(&net_lock);
+       read_unlock_bh(&tipc_net_lock);
        res = msg_data_sz(buf_msg(buf));
        tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
        return res;
@@ -1225,15 +1225,15 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
 
 
 /* 
- * link_send_sections_fast: Entry for messages where the 
+ * tipc_link_send_sections_fast: Entry for messages where the 
  * destination processor is known and the header is complete,
  * except for total message length. 
  * Returns user data length or errno.
  */
-int link_send_sections_fast(struct port *sender, 
-                           struct iovec const *msg_sect,
-                           const u32 num_sect, 
-                           u32 destaddr)
+int tipc_link_send_sections_fast(struct port *sender, 
+                                struct iovec const *msg_sect,
+                                const u32 num_sect, 
+                                u32 destaddr)
 {
        struct tipc_msg *hdr = &sender->publ.phdr;
        struct link *l_ptr;
@@ -1253,10 +1253,10 @@ again:
        res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
                        !sender->user_port, &buf);
 
-       read_lock_bh(&net_lock);
-       node = node_select(destaddr, selector);
+       read_lock_bh(&tipc_net_lock);
+       node = tipc_node_select(destaddr, selector);
        if (likely(node)) {
-               node_lock(node);
+               tipc_node_lock(node);
                l_ptr = node->active_links[selector];
                if (likely(l_ptr)) {
                        if (likely(buf)) {
@@ -1265,8 +1265,8 @@ again:
                                if (unlikely(res < 0))
                                        buf_discard(buf);
 exit:
-                               node_unlock(node);
-                               read_unlock_bh(&net_lock);
+                               tipc_node_unlock(node);
+                               read_unlock_bh(&tipc_net_lock);
                                return res;
                        }
 
@@ -1290,8 +1290,8 @@ exit:
                         */
 
                        sender->max_pkt = link_max_pkt(l_ptr);
-                       node_unlock(node);
-                       read_unlock_bh(&net_lock);
+                       tipc_node_unlock(node);
+                       read_unlock_bh(&tipc_net_lock);
 
 
                        if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
@@ -1300,17 +1300,17 @@ exit:
                        return link_send_sections_long(sender, msg_sect,
                                                       num_sect, destaddr);
                }
-               node_unlock(node);
+               tipc_node_unlock(node);
        }
-       read_unlock_bh(&net_lock);
+       read_unlock_bh(&tipc_net_lock);
 
        /* Couldn't find a link to the destination node */
 
        if (buf)
                return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
        if (res >= 0)
-               return port_reject_sections(sender, hdr, msg_sect, num_sect,
-                                           TIPC_ERR_NO_NODE);
+               return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
+                                                TIPC_ERR_NO_NODE);
        return res;
 }
 
@@ -1444,17 +1444,17 @@ error:
         * Now we have a buffer chain. Select a link and check
         * that packet size is still OK
         */
-       node = node_select(destaddr, sender->publ.ref & 1);
+       node = tipc_node_select(destaddr, sender->publ.ref & 1);
        if (likely(node)) {
-               node_lock(node);
+               tipc_node_lock(node);
                l_ptr = node->active_links[sender->publ.ref & 1];
                if (!l_ptr) {
-                       node_unlock(node);
+                       tipc_node_unlock(node);
                        goto reject;
                }
                if (link_max_pkt(l_ptr) < max_pkt) {
                        sender->max_pkt = link_max_pkt(l_ptr);
-                       node_unlock(node);
+                       tipc_node_unlock(node);
                        for (; buf_chain; buf_chain = buf) {
                                buf = buf_chain->next;
                                buf_discard(buf_chain);
@@ -1467,8 +1467,8 @@ reject:
                        buf = buf_chain->next;
                        buf_discard(buf_chain);
                }
-               return port_reject_sections(sender, hdr, msg_sect, num_sect,
-                                           TIPC_ERR_NO_NODE);
+               return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
+                                                TIPC_ERR_NO_NODE);
        }
 
        /* Append whole chain to send queue: */
@@ -1491,15 +1491,15 @@ reject:
 
        /* Send it, if possible: */
 
-       link_push_queue(l_ptr);
-       node_unlock(node);
+       tipc_link_push_queue(l_ptr);
+       tipc_node_unlock(node);
        return dsz;
 }
 
 /* 
- * link_push_packet: Push one unsent packet to the media
+ * tipc_link_push_packet: Push one unsent packet to the media
  */
-u32 link_push_packet(struct link *l_ptr)
+u32 tipc_link_push_packet(struct link *l_ptr)
 {
        struct sk_buff *buf = l_ptr->first_out;
        u32 r_q_size = l_ptr->retransm_queue_size;
@@ -1526,7 +1526,7 @@ u32 link_push_packet(struct link *l_ptr)
        if (r_q_size && buf && !skb_cloned(buf)) {
                msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
                msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 
-               if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+               if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
                        msg_dbg(buf_msg(buf), ">DEF-RETR>");
                        l_ptr->retransm_queue_head = mod(++r_q_head);
                        l_ptr->retransm_queue_size = --r_q_size;
@@ -1545,7 +1545,7 @@ u32 link_push_packet(struct link *l_ptr)
        if (buf) {
                msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
                msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in); 
-               if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+               if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
                        msg_dbg(buf_msg(buf), ">DEF-PROT>");
                        l_ptr->unacked_window = 0;
                        buf_discard(buf);
@@ -1569,7 +1569,7 @@ u32 link_push_packet(struct link *l_ptr)
                if (mod(next - first) < l_ptr->queue_limit[0]) {
                        msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
                        msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
-                       if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+                       if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
                                if (msg_user(msg) == MSG_BUNDLER)
                                        msg_set_type(msg, CLOSED_MSG);
                                msg_dbg(msg, ">PUSH-DATA>");
@@ -1589,29 +1589,29 @@ u32 link_push_packet(struct link *l_ptr)
  * push_queue(): push out the unsent messages of a link where
  *               congestion has abated. Node is locked
  */
-void link_push_queue(struct link *l_ptr)
+void tipc_link_push_queue(struct link *l_ptr)
 {
        u32 res;
 
-       if (bearer_congested(l_ptr->b_ptr, l_ptr))
+       if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
                return;
 
        do {
-               res = link_push_packet(l_ptr);
+               res = tipc_link_push_packet(l_ptr);
        }
        while (res == TIPC_OK);
        if (res == PUSH_FAILED)
-               bearer_schedule(l_ptr->b_ptr, l_ptr);
+               tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
 }
 
-void link_retransmit(struct link *l_ptr, struct sk_buff *buf, 
-                    u32 retransmits)
+void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf, 
+                         u32 retransmits)
 {
        struct tipc_msg *msg;
 
        dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
 
-       if (bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
+       if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
                msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
                dbg_print_link(l_ptr, "   ");
                l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
@@ -1622,15 +1622,15 @@ void link_retransmit(struct link *l_ptr, struct sk_buff *buf,
                msg = buf_msg(buf);
                msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
                msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
-               if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+               if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
                         /* Catch if retransmissions fail repeatedly: */
                         if (l_ptr->last_retransmitted == msg_seqno(msg)) {
                                 if (++l_ptr->stale_count > 100) {
-                                        msg_print(CONS, buf_msg(buf), ">RETR>");
+                                        tipc_msg_print(TIPC_CONS, buf_msg(buf), ">RETR>");
                                         info("...Retransmitted %u times\n",
                                             l_ptr->stale_count);
-                                        link_print(l_ptr, CONS, "Resetting Link\n");;
-                                        link_reset(l_ptr);
+                                        link_print(l_ptr, TIPC_CONS, "Resetting Link\n");;
+                                        tipc_link_reset(l_ptr);
                                         break;
                                 }
                         } else {
@@ -1643,7 +1643,7 @@ void link_retransmit(struct link *l_ptr, struct sk_buff *buf,
                        retransmits--;
                        l_ptr->stats.retransmitted++;
                } else {
-                       bearer_schedule(l_ptr->b_ptr, l_ptr);
+                       tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
                        l_ptr->stats.bearer_congs++;
                        l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
                        l_ptr->retransm_queue_size = retransmits;
@@ -1663,9 +1663,9 @@ static void link_recv_non_seq(struct sk_buff *buf)
        struct tipc_msg *msg = buf_msg(buf);
 
        if (msg_user(msg) ==  LINK_CONFIG)
-               disc_recv_msg(buf);
+               tipc_disc_recv_msg(buf);
        else
-               bclink_recv_pkt(buf);
+               tipc_bclink_recv_pkt(buf);
 }
 
 /** 
@@ -1692,7 +1692,7 @@ static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
 
 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
 {
-       read_lock_bh(&net_lock);
+       read_lock_bh(&tipc_net_lock);
        while (head) {
                struct bearer *b_ptr;
                struct node *n_ptr;
@@ -1720,22 +1720,22 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
                        link_recv_non_seq(buf);
                        continue;
                }
-               n_ptr = node_find(msg_prevnode(msg));
+               n_ptr = tipc_node_find(msg_prevnode(msg));
                if (unlikely(!n_ptr))
                        goto cont;
 
-               node_lock(n_ptr);
+               tipc_node_lock(n_ptr);
                l_ptr = n_ptr->links[b_ptr->identity];
                if (unlikely(!l_ptr)) {
-                       node_unlock(n_ptr);
+                       tipc_node_unlock(n_ptr);
                        goto cont;
                }
                /* 
                 * Release acked messages 
                 */
                if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
-                       if (node_is_up(n_ptr) && n_ptr->bclink.supported)
-                               bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
+                       if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
+                               tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
                }
 
                crs = l_ptr->first_out;
@@ -1752,12 +1752,12 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
                        l_ptr->out_queue_size -= released;
                }
                if (unlikely(l_ptr->next_out))
-                       link_push_queue(l_ptr);
+                       tipc_link_push_queue(l_ptr);
                if (unlikely(!list_empty(&l_ptr->waiting_ports)))
-                       link_wakeup_ports(l_ptr, 0);
+                       tipc_link_wakeup_ports(l_ptr, 0);
                if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
                        l_ptr->stats.sent_acks++;
-                       link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
                }
 
 protocol_check:
@@ -1770,8 +1770,8 @@ protocol_check:
                                if (likely(msg_is_dest(msg, tipc_own_addr))) {
 deliver:
                                        if (likely(msg_isdata(msg))) {
-                                               node_unlock(n_ptr);
-                                               port_recv_msg(buf);
+                                               tipc_node_unlock(n_ptr);
+                                               tipc_port_recv_msg(buf);
                                                continue;
                                        }
                                        switch (msg_user(msg)) {
@@ -1779,34 +1779,32 @@ deliver:
                                                l_ptr->stats.recv_bundles++;
                                                l_ptr->stats.recv_bundled += 
                                                        msg_msgcnt(msg);
-                                               node_unlock(n_ptr);
-                                               link_recv_bundle(buf);
+                                               tipc_node_unlock(n_ptr);
+                                               tipc_link_recv_bundle(buf);
                                                continue;
                                        case ROUTE_DISTRIBUTOR:
-                                               node_unlock(n_ptr);
-                                               cluster_recv_routing_table(buf);
+                                               tipc_node_unlock(n_ptr);
+                                               tipc_cltr_recv_routing_table(buf);
                                                continue;
                                        case NAME_DISTRIBUTOR:
-                                               node_unlock(n_ptr);
-                                               named_recv(buf);
+                                               tipc_node_unlock(n_ptr);
+                                               tipc_named_recv(buf);
                                                continue;
                                        case CONN_MANAGER:
-                                               node_unlock(n_ptr);
-                                               port_recv_proto_msg(buf);
+                                               tipc_node_unlock(n_ptr);
+                                               tipc_port_recv_proto_msg(buf);
                                                continue;
                                        case MSG_FRAGMENTER:
                                                l_ptr->stats.recv_fragments++;
-                                               if (link_recv_fragment(
-                                                       &l_ptr->defragm_buf, 
-                                                       &buf, &msg)) {
+                                               if (tipc_link_recv_fragment(&l_ptr->defragm_buf, 
+                                                                           &buf, &msg)) {
                                                        l_ptr->stats.recv_fragmented++;
                                                        goto deliver;
                                                }
                                                break;
                                        case CHANGEOVER_PROTOCOL:
                                                type = msg_type(msg);
-                                               if (link_recv_changeover_msg(
-                                                       &l_ptr, &buf)) {
+                                               if (link_recv_changeover_msg(&l_ptr, &buf)) {
                                                        msg = buf_msg(buf);
                                                        seq_no = msg_seqno(msg);
                                                        TIPC_SKB_CB(buf)->handle 
@@ -1818,20 +1816,20 @@ deliver:
                                                break;
                                        }
                                }
-                               node_unlock(n_ptr);
-                               net_route_msg(buf);
+                               tipc_node_unlock(n_ptr);
+                               tipc_net_route_msg(buf);
                                continue;
                        }
                        link_handle_out_of_seq_msg(l_ptr, buf);
                        head = link_insert_deferred_queue(l_ptr, head);
-                       node_unlock(n_ptr);
+                       tipc_node_unlock(n_ptr);
                        continue;
                }
 
                if (msg_user(msg) == LINK_PROTOCOL) {
                        link_recv_proto_msg(l_ptr, buf);
                        head = link_insert_deferred_queue(l_ptr, head);
-                       node_unlock(n_ptr);
+                       tipc_node_unlock(n_ptr);
                        continue;
                }
                msg_dbg(msg,"NSEQ<REC<");
@@ -1842,14 +1840,14 @@ deliver:
                        msg_dbg(msg,"RECV-REINS:");
                        buf->next = head;
                        head = buf;
-                       node_unlock(n_ptr);
+                       tipc_node_unlock(n_ptr);
                        continue;
                }
-               node_unlock(n_ptr);
+               tipc_node_unlock(n_ptr);
 cont:
                buf_discard(buf);
        }
-       read_unlock_bh(&net_lock);
+       read_unlock_bh(&tipc_net_lock);
 }
 
 /* 
@@ -1858,9 +1856,9 @@ cont:
  * Returns the increase of the queue length,i.e. 0 or 1
  */
 
-u32 link_defer_pkt(struct sk_buff **head,
-                  struct sk_buff **tail,
-                  struct sk_buff *buf)
+u32 tipc_link_defer_pkt(struct sk_buff **head,
+                       struct sk_buff **tail,
+                       struct sk_buff *buf)
 {
        struct sk_buff *prev = 0;
        struct sk_buff *crs = *head;
@@ -1939,12 +1937,12 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
                return;
        }
 
-       if (link_defer_pkt(&l_ptr->oldest_deferred_in,
-                          &l_ptr->newest_deferred_in, buf)) {
+       if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
+                               &l_ptr->newest_deferred_in, buf)) {
                l_ptr->deferred_inqueue_sz++;
                l_ptr->stats.deferred_recv++;
                if ((l_ptr->deferred_inqueue_sz % 16) == 1)
-                       link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
        } else
                l_ptr->stats.duplicates++;
 }
@@ -1952,8 +1950,8 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
 /*
  * Send protocol message to the other endpoint.
  */
-void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
-                        u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
+void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
+                             u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
 {
        struct sk_buff *buf = 0;
        struct tipc_msg *msg = l_ptr->pmsg;
@@ -1964,12 +1962,12 @@ void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
        msg_set_type(msg, msg_typ);
        msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
        msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); 
-       msg_set_last_bcast(msg, bclink_get_last_sent());
+       msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
 
        if (msg_typ == STATE_MSG) {
                u32 next_sent = mod(l_ptr->next_out_no);
 
-               if (!link_is_up(l_ptr))
+               if (!tipc_link_is_up(l_ptr))
                        return;
                if (l_ptr->next_out)
                        next_sent = msg_seqno(buf_msg(l_ptr->next_out));
@@ -2013,7 +2011,7 @@ void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
                msg_set_max_pkt(msg, l_ptr->max_pkt_target);
        }
 
-       if (node_has_redundant_links(l_ptr->owner)) {
+       if (tipc_node_has_redundant_links(l_ptr->owner)) {
                msg_set_redundant_link(msg);
        } else {
                msg_clear_redundant_link(msg);
@@ -2026,7 +2024,7 @@ void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
 
        /* Congestion? */
 
-       if (bearer_congested(l_ptr->b_ptr, l_ptr)) {
+       if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
                if (!l_ptr->proto_msg_queue) {
                        l_ptr->proto_msg_queue =
                                buf_acquire(sizeof(l_ptr->proto_msg));
@@ -2050,14 +2048,14 @@ void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
        memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
         msg_set_size(buf_msg(buf), msg_size);
 
-       if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+       if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
                l_ptr->unacked_window = 0;
                buf_discard(buf);
                return;
        }
 
        /* New congestion */
-       bearer_schedule(l_ptr->b_ptr, l_ptr);
+       tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
        l_ptr->proto_msg_queue = buf;
        l_ptr->stats.bearer_congs++;
 }
@@ -2131,7 +2129,7 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
                l_ptr->peer_bearer_id = msg_bearer_id(msg);
 
                /* Synchronize broadcast sequence numbers */
-               if (!node_has_redundant_links(l_ptr->owner)) {
+               if (!tipc_node_has_redundant_links(l_ptr->owner)) {
                        l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
                }
                break;
@@ -2145,7 +2143,7 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
                        warn("Changing prio <%s>: %u->%u\n",
                             l_ptr->name, l_ptr->priority, msg_linkprio(msg));
                        l_ptr->priority = msg_linkprio(msg);
-                       link_reset(l_ptr); /* Enforce change to take effect */
+                       tipc_link_reset(l_ptr); /* Enforce change to take effect */
                        break;
                }
                link_state_event(l_ptr, TRAFFIC_MSG_EVT);
@@ -2176,17 +2174,17 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
 
                /* Protocol message before retransmits, reduce loss risk */
 
-               bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
+               tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
 
                if (rec_gap || (msg_probe(msg))) {
-                       link_send_proto_msg(l_ptr, STATE_MSG,
-                                           0, rec_gap, 0, 0, max_pkt_ack);
+                       tipc_link_send_proto_msg(l_ptr, STATE_MSG,
+                                                0, rec_gap, 0, 0, max_pkt_ack);
                }
                if (msg_seq_gap(msg)) {
                        msg_dbg(msg, "With Gap:");
                        l_ptr->stats.recv_nacks++;
-                       link_retransmit(l_ptr, l_ptr->first_out,
-                                       msg_seq_gap(msg));
+                       tipc_link_retransmit(l_ptr, l_ptr->first_out,
+                                            msg_seq_gap(msg));
                }
                break;
        default:
@@ -2198,20 +2196,20 @@ exit:
 
 
 /*
- * link_tunnel(): Send one message via a link belonging to 
+ * tipc_link_tunnel(): Send one message via a link belonging to 
  * another bearer. Owner node is locked.
  */
-void link_tunnel(struct link *l_ptr, 
-           struct tipc_msg *tunnel_hdr, 
-           struct tipc_msg  *msg,
-           u32 selector)
+void tipc_link_tunnel(struct link *l_ptr, 
+                     struct tipc_msg *tunnel_hdr, 
+                     struct tipc_msg  *msg,
+                     u32 selector)
 {
        struct link *tunnel;
        struct sk_buff *buf;
        u32 length = msg_size(msg);
 
        tunnel = l_ptr->owner->active_links[selector & 1];
-       if (!link_is_up(tunnel))
+       if (!tipc_link_is_up(tunnel))
                return;
        msg_set_size(tunnel_hdr, length + INT_H_SIZE);
        buf = buf_acquire(length + INT_H_SIZE);
@@ -2222,7 +2220,7 @@ void link_tunnel(struct link *l_ptr,
        dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
        msg_dbg(buf_msg(buf), ">SEND>");
        assert(tunnel);
-       link_send_buf(tunnel, buf);
+       tipc_link_send_buf(tunnel, buf);
 }
 
 
@@ -2232,12 +2230,12 @@ void link_tunnel(struct link *l_ptr,
  *               Owner node is locked.
  */
 
-void link_changeover(struct link *l_ptr)
+void tipc_link_changeover(struct link *l_ptr)
 {
        u32 msgcount = l_ptr->out_queue_size;
        struct sk_buff *crs = l_ptr->first_out;
        struct link *tunnel = l_ptr->owner->active_links[0];
-       int split_bundles = node_has_redundant_links(l_ptr->owner);
+       int split_bundles = tipc_node_has_redundant_links(l_ptr->owner);
        struct tipc_msg tunnel_hdr;
 
        if (!tunnel)
@@ -2261,7 +2259,7 @@ void link_changeover(struct link *l_ptr)
                        dbg("%c->%c:", l_ptr->b_ptr->net_plane,
                            tunnel->b_ptr->net_plane);
                        msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
-                       link_send_buf(tunnel, buf);
+                       tipc_link_send_buf(tunnel, buf);
                } else {
                        warn("Memory squeeze; link changeover failed\n");
                }
@@ -2277,20 +2275,20 @@ void link_changeover(struct link *l_ptr)
 
                        while (msgcount--) {
                                msg_set_seqno(m,msg_seqno(msg));
-                               link_tunnel(l_ptr, &tunnel_hdr, m,
-                                           msg_link_selector(m));
+                               tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
+                                                msg_link_selector(m));
                                pos += align(msg_size(m));
                                m = (struct tipc_msg *)pos;
                        }
                } else {
-                       link_tunnel(l_ptr, &tunnel_hdr, msg,
-                                   msg_link_selector(msg));
+                       tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
+                                        msg_link_selector(msg));
                }
                crs = crs->next;
        }
 }
 
-void link_send_duplicate(struct link *l_ptr, struct link *tunnel)
+void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
 {
        struct sk_buff *iter;
        struct tipc_msg tunnel_hdr;
@@ -2320,8 +2318,8 @@ void link_send_duplicate(struct link *l_ptr, struct link *tunnel)
                dbg("%c->%c:", l_ptr->b_ptr->net_plane,
                    tunnel->b_ptr->net_plane);
                msg_dbg(buf_msg(outbuf), ">SEND>");
-               link_send_buf(tunnel, outbuf);
-               if (!link_is_up(l_ptr))
+               tipc_link_send_buf(tunnel, outbuf);
+               if (!tipc_link_is_up(l_ptr))
                        return;
                iter = iter->next;
        }
@@ -2393,9 +2391,9 @@ static int link_recv_changeover_msg(struct link **l_ptr,
 
        /* First original message ?: */
 
-       if (link_is_up(dest_link)) {
+       if (tipc_link_is_up(dest_link)) {
                msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
-               link_reset(dest_link);
+               tipc_link_reset(dest_link);
                dest_link->exp_msg_count = msg_count;
                if (!msg_count)
                        goto exit;
@@ -2436,7 +2434,7 @@ exit:
 /*
  *  Bundler functionality:
  */
-void link_recv_bundle(struct sk_buff *buf)
+void tipc_link_recv_bundle(struct sk_buff *buf)
 {
        u32 msgcount = msg_msgcnt(buf_msg(buf));
        u32 pos = INT_H_SIZE;
@@ -2456,7 +2454,7 @@ void link_recv_bundle(struct sk_buff *buf)
                };
                pos += align(msg_size(buf_msg(obuf)));
                msg_dbg(buf_msg(obuf), "     /");
-               net_route_msg(obuf);
+               tipc_net_route_msg(obuf);
        }
        buf_discard(buf);
 }
@@ -2467,11 +2465,11 @@ void link_recv_bundle(struct sk_buff *buf)
 
 
 /* 
- * link_send_long_buf: Entry for buffers needing fragmentation.
+ * tipc_link_send_long_buf: Entry for buffers needing fragmentation.
  * The buffer is complete, inclusive total message length. 
  * Returns user data length.
  */
-int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
+int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
 {
        struct tipc_msg *inmsg = buf_msg(buf);
        struct tipc_msg fragm_hdr;
@@ -2521,8 +2519,8 @@ int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
                /*  Send queued messages first, if any: */
 
                l_ptr->stats.sent_fragments++;
-               link_send_buf(l_ptr, fragm);
-               if (!link_is_up(l_ptr))
+               tipc_link_send_buf(l_ptr, fragm);
+               if (!tipc_link_is_up(l_ptr))
                        return dsz;
                msg_set_fragm_no(&fragm_hdr, ++fragm_no);
                rest -= fragm_sz;
@@ -2582,11 +2580,11 @@ static inline void incr_timer_cnt(struct sk_buff *buf)
 }
 
 /* 
- * link_recv_fragment(): Called with node lock on. Returns 
+ * tipc_link_recv_fragment(): Called with node lock on. Returns 
  * the reassembled buffer if message is complete.
  */
-int link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, 
-                      struct tipc_msg **m)
+int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, 
+                           struct tipc_msg **m)
 {
        struct sk_buff *prev = 0;
        struct sk_buff *fbuf = *fb;
@@ -2714,7 +2712,7 @@ static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
 }
 
 
-void link_set_queue_limits(struct link *l_ptr, u32 window)
+void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
 {
        /* Data messages from this node, inclusive FIRST_FRAGM */
        l_ptr->queue_limit[DATA_LOW] = window;
@@ -2739,7 +2737,7 @@ void link_set_queue_limits(struct link *l_ptr, u32 window)
  * @name - ptr to link name string
  * @node - ptr to area to be filled with ptr to associated node
  * 
- * Caller must hold 'net_lock' to ensure node and bearer are not deleted;
+ * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
  * this also prevents link deletion.
  * 
  * Returns pointer to link (or 0 if invalid link name).
@@ -2754,11 +2752,11 @@ static struct link *link_find_link(const char *name, struct node **node)
        if (!link_name_validate(name, &link_name_parts))
                return 0;
 
-       b_ptr = bearer_find_interface(link_name_parts.if_local);
+       b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
        if (!b_ptr)
                return 0;
 
-       *node = node_find(link_name_parts.addr_peer); 
+       *node = tipc_node_find(link_name_parts.addr_peer); 
        if (!*node)
                return 0;
 
@@ -2769,8 +2767,8 @@ static struct link *link_find_link(const char *name, struct node **node)
        return l_ptr;
 }
 
-struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, 
-                               u16 cmd)
+struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, 
+                                    u16 cmd)
 {
        struct tipc_link_config *args;
         u32 new_value;
@@ -2779,61 +2777,62 @@ struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space,
         int res;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
        new_value = ntohl(args->value);
 
-       if (!strcmp(args->name, bc_link_name)) {
+       if (!strcmp(args->name, tipc_bclink_name)) {
                if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
-                   (bclink_set_queue_limits(new_value) == 0))
-                       return cfg_reply_none();
-               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
-                                             " (cannot change setting on broadcast link)");
+                   (tipc_bclink_set_queue_limits(new_value) == 0))
+                       return tipc_cfg_reply_none();
+               return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                                  " (cannot change setting on broadcast link)");
        }
 
-       read_lock_bh(&net_lock);
+       read_lock_bh(&tipc_net_lock);
        l_ptr = link_find_link(args->name, &node); 
        if (!l_ptr) {
-               read_unlock_bh(&net_lock);
-               return cfg_reply_error_string("link not found");
+               read_unlock_bh(&tipc_net_lock);
+               return tipc_cfg_reply_error_string("link not found");
        }
 
-       node_lock(node);
+       tipc_node_lock(node);
        res = -EINVAL;
        switch (cmd) {
        case TIPC_CMD_SET_LINK_TOL: 
                if ((new_value >= TIPC_MIN_LINK_TOL) && 
                    (new_value <= TIPC_MAX_LINK_TOL)) {
                        link_set_supervision_props(l_ptr, new_value);
-                       link_send_proto_msg(l_ptr, STATE_MSG, 
-                                           0, 0, new_value, 0, 0);
+                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 
+                                                0, 0, new_value, 0, 0);
                        res = TIPC_OK;
                }
                break;
        case TIPC_CMD_SET_LINK_PRI: 
-               if (new_value < TIPC_NUM_LINK_PRI) {
+               if ((new_value >= TIPC_MIN_LINK_PRI) &&
+                   (new_value <= TIPC_MAX_LINK_PRI)) {
                        l_ptr->priority = new_value;
-                       link_send_proto_msg(l_ptr, STATE_MSG, 
-                                           0, 0, 0, new_value, 0);
+                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 
+                                                0, 0, 0, new_value, 0);
                        res = TIPC_OK;
                }
                break;
        case TIPC_CMD_SET_LINK_WINDOW: 
                if ((new_value >= TIPC_MIN_LINK_WIN) && 
                    (new_value <= TIPC_MAX_LINK_WIN)) {
-                       link_set_queue_limits(l_ptr, new_value);
+                       tipc_link_set_queue_limits(l_ptr, new_value);
                        res = TIPC_OK;
                }
                break;
        }
-       node_unlock(node);
+       tipc_node_unlock(node);
 
-       read_unlock_bh(&net_lock);
+       read_unlock_bh(&tipc_net_lock);
        if (res)
-               return cfg_reply_error_string("cannot change link setting");
+               return tipc_cfg_reply_error_string("cannot change link setting");
 
-       return cfg_reply_none();
+       return tipc_cfg_reply_none();
 }
 
 /**
@@ -2848,34 +2847,34 @@ static void link_reset_statistics(struct link *l_ptr)
        l_ptr->stats.recv_info = l_ptr->next_in_no;
 }
 
-struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
 {
        char *link_name;
        struct link *l_ptr; 
        struct node *node;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        link_name = (char *)TLV_DATA(req_tlv_area);
-       if (!strcmp(link_name, bc_link_name)) {
-               if (bclink_reset_stats())
-                       return cfg_reply_error_string("link not found");
-               return cfg_reply_none();
+       if (!strcmp(link_name, tipc_bclink_name)) {
+               if (tipc_bclink_reset_stats())
+                       return tipc_cfg_reply_error_string("link not found");
+               return tipc_cfg_reply_none();
        }
 
-       read_lock_bh(&net_lock);
+       read_lock_bh(&tipc_net_lock);
        l_ptr = link_find_link(link_name, &node); 
        if (!l_ptr) {
-               read_unlock_bh(&net_lock);
-               return cfg_reply_error_string("link not found");
+               read_unlock_bh(&tipc_net_lock);
+               return tipc_cfg_reply_error_string("link not found");
        }
 
-       node_lock(node);
+       tipc_node_lock(node);
        link_reset_statistics(l_ptr);
-       node_unlock(node);
-       read_unlock_bh(&net_lock);
-       return cfg_reply_none();
+       tipc_node_unlock(node);
+       read_unlock_bh(&tipc_net_lock);
+       return tipc_cfg_reply_none();
 }
 
 /**
@@ -2888,7 +2887,7 @@ static u32 percent(u32 count, u32 total)
 }
 
 /**
- * link_stats - print link statistics
+ * tipc_link_stats - print link statistics
  * @name: link name
  * @buf: print buffer area
  * @buf_size: size of print buffer area
@@ -2896,7 +2895,7 @@ static u32 percent(u32 count, u32 total)
  * Returns length of print buffer data string (or 0 if error)
  */
 
-static int link_stats(const char *name, char *buf, const u32 buf_size)
+static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
 {
        struct print_buf pb;
        struct link *l_ptr; 
@@ -2904,22 +2903,22 @@ static int link_stats(const char *name, char *buf, const u32 buf_size)
        char *status;
        u32 profile_total = 0;
 
-       if (!strcmp(name, bc_link_name))
-               return bclink_stats(buf, buf_size);
+       if (!strcmp(name, tipc_bclink_name))
+               return tipc_bclink_stats(buf, buf_size);
 
-       printbuf_init(&pb, buf, buf_size);
+       tipc_printbuf_init(&pb, buf, buf_size);
 
-       read_lock_bh(&net_lock);
+       read_lock_bh(&tipc_net_lock);
        l_ptr = link_find_link(name, &node); 
        if (!l_ptr) {
-               read_unlock_bh(&net_lock);
+               read_unlock_bh(&tipc_net_lock);
                return 0;
        }
-       node_lock(node);
+       tipc_node_lock(node);
 
-       if (link_is_active(l_ptr))
+       if (tipc_link_is_active(l_ptr))
                status = "ACTIVE";
-       else if (link_is_up(l_ptr))
+       else if (tipc_link_is_up(l_ptr))
                status = "STANDBY";
        else
                status = "DEFUNCT";
@@ -2975,33 +2974,33 @@ static int link_stats(const char *name, char *buf, const u32 buf_size)
                    ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
                    : 0);
 
-       node_unlock(node);
-       read_unlock_bh(&net_lock);
-       return printbuf_validate(&pb);
+       tipc_node_unlock(node);
+       read_unlock_bh(&tipc_net_lock);
+       return tipc_printbuf_validate(&pb);
 }
 
 #define MAX_LINK_STATS_INFO 2000
 
-struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
 {
        struct sk_buff *buf;
        struct tlv_desc *rep_tlv;
        int str_len;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
-       buf = cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
+       buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
        if (!buf)
                return NULL;
 
        rep_tlv = (struct tlv_desc *)buf->data;
 
-       str_len = link_stats((char *)TLV_DATA(req_tlv_area),
-                            (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
+       str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
+                                 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
        if (!str_len) {
                buf_discard(buf);
-               return cfg_reply_error_string("link not found");
+               return tipc_cfg_reply_error_string("link not found");
        }
 
        skb_put(buf, TLV_SPACE(str_len));
@@ -3020,20 +3019,20 @@ int link_control(const char *name, u32 op, u32 val)
        u32 a;
 
        a = link_name2addr(name, &bearer_id);
-       read_lock_bh(&net_lock);
-       node = node_find(a);
+       read_lock_bh(&tipc_net_lock);
+       node = tipc_node_find(a);
        if (node) {
-               node_lock(node);
+               tipc_node_lock(node);
                l_ptr = node->links[bearer_id];
                if (l_ptr) {
                        if (op == TIPC_REMOVE_LINK) {
                                struct bearer *b_ptr = l_ptr->b_ptr;
                                spin_lock_bh(&b_ptr->publ.lock);
-                               link_delete(l_ptr);
+                               tipc_link_delete(l_ptr);
                                spin_unlock_bh(&b_ptr->publ.lock);
                        }
                        if (op == TIPC_CMD_BLOCK_LINK) {
-                               link_reset(l_ptr);
+                               tipc_link_reset(l_ptr);
                                l_ptr->blocked = 1;
                        }
                        if (op == TIPC_CMD_UNBLOCK_LINK) {
@@ -3041,22 +3040,22 @@ int link_control(const char *name, u32 op, u32 val)
                        }
                        res = TIPC_OK;
                }
-               node_unlock(node);
+               tipc_node_unlock(node);
        }
-       read_unlock_bh(&net_lock);
+       read_unlock_bh(&tipc_net_lock);
        return res;
 }
 #endif
 
 /**
- * link_get_max_pkt - get maximum packet size to use when sending to destination
+ * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
  * @dest: network address of destination node
  * @selector: used to select from set of active links
  * 
  * If no active link can be found, uses default maximum packet size.
  */
 
-u32 link_get_max_pkt(u32 dest, u32 selector)
+u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
 {
        struct node *n_ptr;
        struct link *l_ptr;
@@ -3065,16 +3064,16 @@ u32 link_get_max_pkt(u32 dest, u32 selector)
        if (dest == tipc_own_addr)
                return MAX_MSG_SIZE;
 
-       read_lock_bh(&net_lock);        
-       n_ptr = node_select(dest, selector);
+       read_lock_bh(&tipc_net_lock);        
+       n_ptr = tipc_node_select(dest, selector);
        if (n_ptr) {
-               node_lock(n_ptr);
+               tipc_node_lock(n_ptr);
                l_ptr = n_ptr->active_links[selector & 1];
                if (l_ptr)
                        res = link_max_pkt(l_ptr);
-               node_unlock(n_ptr);
+               tipc_node_unlock(n_ptr);
        }
-       read_unlock_bh(&net_lock);       
+       read_unlock_bh(&tipc_net_lock);       
        return res;
 }
 
index c2553f0737570aef827524c83e69f1408b6740dd..2d3c157f707d179e8961dceb6b36fa104e38d4f4 100644 (file)
@@ -221,44 +221,43 @@ struct link {
 
 struct port;
 
-struct link *link_create(struct bearer *b_ptr, const u32 peer,
-                        const struct tipc_media_addr *media_addr);
-void link_delete(struct link *l_ptr);
-void link_changeover(struct link *l_ptr);
-void link_send_duplicate(struct link *l_ptr, struct link *dest);
-void link_reset_fragments(struct link *l_ptr);
-int link_is_up(struct link *l_ptr);
-int link_is_active(struct link *l_ptr);
-void link_start(struct link *l_ptr);
-u32 link_push_packet(struct link *l_ptr);
-void link_stop(struct link *l_ptr);
-struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
-struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
-struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
-void link_reset(struct link *l_ptr);
-int link_send(struct sk_buff *buf, u32 dest, u32 selector);
-int link_send_buf(struct link *l_ptr, struct sk_buff *buf);
-u32 link_get_max_pkt(u32 dest,u32 selector);
-int link_send_sections_fast(struct port* sender, 
-                           struct iovec const *msg_sect,
-                           const u32 num_sect, 
-                           u32 destnode);
-
-int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
-void link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr,
-                struct tipc_msg *msg, u32 selector);
-void link_recv_bundle(struct sk_buff *buf);
-int  link_recv_fragment(struct sk_buff **pending,
-                       struct sk_buff **fb,
-                       struct tipc_msg **msg);
-void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap, 
-                        u32 tolerance, u32 priority, u32 acked_mtu);
-void link_push_queue(struct link *l_ptr);
-u32 link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
+struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
+                             const struct tipc_media_addr *media_addr);
+void tipc_link_delete(struct link *l_ptr);
+void tipc_link_changeover(struct link *l_ptr);
+void tipc_link_send_duplicate(struct link *l_ptr, struct link *dest);
+void tipc_link_reset_fragments(struct link *l_ptr);
+int tipc_link_is_up(struct link *l_ptr);
+int tipc_link_is_active(struct link *l_ptr);
+void tipc_link_start(struct link *l_ptr);
+u32 tipc_link_push_packet(struct link *l_ptr);
+void tipc_link_stop(struct link *l_ptr);
+struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
+struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
+struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
+void tipc_link_reset(struct link *l_ptr);
+int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
+int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
+u32 tipc_link_get_max_pkt(u32 dest,u32 selector);
+int tipc_link_send_sections_fast(struct port* sender, 
+                                struct iovec const *msg_sect,
+                                const u32 num_sect, 
+                                u32 destnode);
+int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
+void tipc_link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr,
+                     struct tipc_msg *msg, u32 selector);
+void tipc_link_recv_bundle(struct sk_buff *buf);
+int  tipc_link_recv_fragment(struct sk_buff **pending,
+                            struct sk_buff **fb,
+                            struct tipc_msg **msg);
+void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap, 
+                             u32 tolerance, u32 priority, u32 acked_mtu);
+void tipc_link_push_queue(struct link *l_ptr);
+u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
                   struct sk_buff *buf);
-void link_wakeup_ports(struct link *l_ptr, int all);
-void link_set_queue_limits(struct link *l_ptr, u32 window);
-void link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits);
+void tipc_link_wakeup_ports(struct link *l_ptr, int all);
+void tipc_link_set_queue_limits(struct link *l_ptr, u32 window);
+void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits);
 
 /*
  * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
index 03dbc55cb04c4cf9d52d2e289feed0d0ed50dd09..3bd345a344e52eae9a5bb783f60219eefc4f440d 100644 (file)
 #include "bearer.h"
 
 
-void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
-{
-       memcpy(&((int *)m)[5], a, sizeof(*a));
-}
-
-void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
-{
-       memcpy(a, &((int*)m)[5], sizeof(*a));
-}
-
-
-void msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
+void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
 {
        u32 usr = msg_user(msg);
        tipc_printf(buf, str);
@@ -318,7 +307,7 @@ void msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
                tipc_printf(buf, ":REQL(%u):", msg_req_links(msg));
                tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
                tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
-               media_addr_printf(buf, orig);
+               tipc_media_addr_printf(buf, orig);
        }
        if (msg_user(msg) == BCAST_PROTOCOL) {
                tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
@@ -326,9 +315,9 @@ void msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
        }
        tipc_printf(buf, "\n");
        if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) {
-               msg_print(buf,msg_get_wrapped(msg),"      /");
+               tipc_msg_print(buf,msg_get_wrapped(msg),"      /");
        }
        if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) {
-               msg_print(buf,msg_get_wrapped(msg),"      /");
+               tipc_msg_print(buf,msg_get_wrapped(msg),"      /");
        }
 }
index 662c81862a0c6211441900bc3a428fdf2ce4ae5b..6699aaf7bd4cdefa305ef8eb95d19fc5ed987241 100644 (file)
@@ -37,7 +37,7 @@
 #ifndef _TIPC_MSG_H
 #define _TIPC_MSG_H
 
-#include <net/tipc/tipc_msg.h>
+#include "core.h"
 
 #define TIPC_VERSION              2
 #define DATA_LOW                  TIPC_LOW_IMPORTANCE
@@ -805,14 +805,14 @@ static inline int msg_build(struct tipc_msg *hdr,
        return -EFAULT;
 }
 
+static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
+{
+       memcpy(&((int *)m)[5], a, sizeof(*a));
+}
 
-struct tipc_media_addr;
-
-extern void msg_set_media_addr(struct tipc_msg *m,
-                              struct tipc_media_addr *a);
-
-extern void msg_get_media_addr(struct tipc_msg *m,
-                              struct tipc_media_addr *a);
-
+static inline void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
+{
+       memcpy(a, &((int*)m)[5], sizeof(*a));
+}
 
 #endif
index 41cbaf1a4a7301c915a08c52da9ae92b2c14e1b5..830f9099904182daa81dc9e2b76151c81570546e 100644 (file)
@@ -114,10 +114,10 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
 }
 
 /**
- * named_publish - tell other nodes about a new publication by this node
+ * tipc_named_publish - tell other nodes about a new publication by this node
  */
 
-void named_publish(struct publication *publ)
+void tipc_named_publish(struct publication *publ)
 {
        struct sk_buff *buf;
        struct distr_item *item;
@@ -133,15 +133,15 @@ void named_publish(struct publication *publ)
 
        item = (struct distr_item *)msg_data(buf_msg(buf));
        publ_to_item(item, publ);
-       dbg("named_withdraw: broadcasting publish msg\n");
-       cluster_broadcast(buf);
+       dbg("tipc_named_withdraw: broadcasting publish msg\n");
+       tipc_cltr_broadcast(buf);
 }
 
 /**
- * named_withdraw - tell other nodes about a withdrawn publication by this node
+ * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
  */
 
-void named_withdraw(struct publication *publ)
+void tipc_named_withdraw(struct publication *publ)
 {
        struct sk_buff *buf;
        struct distr_item *item;
@@ -157,15 +157,15 @@ void named_withdraw(struct publication *publ)
 
        item = (struct distr_item *)msg_data(buf_msg(buf));
        publ_to_item(item, publ);
-       dbg("named_withdraw: broadcasting withdraw msg\n");
-       cluster_broadcast(buf);
+       dbg("tipc_named_withdraw: broadcasting withdraw msg\n");
+       tipc_cltr_broadcast(buf);
 }
 
 /**
- * named_node_up - tell specified node about all publications by this node
+ * tipc_named_node_up - tell specified node about all publications by this node
  */
 
-void named_node_up(unsigned long node)
+void tipc_named_node_up(unsigned long node)
 {
        struct publication *publ;
        struct distr_item *item = 0;
@@ -175,7 +175,7 @@ void named_node_up(unsigned long node)
        u32 max_item_buf;
 
        assert(in_own_cluster(node));
-       read_lock_bh(&nametbl_lock); 
+       read_lock_bh(&tipc_nametbl_lock); 
        max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
        max_item_buf *= ITEM_SIZE;
        rest = publ_cnt * ITEM_SIZE;
@@ -196,15 +196,15 @@ void named_node_up(unsigned long node)
                left -= ITEM_SIZE;
                if (!left) {
                        msg_set_link_selector(buf_msg(buf), node);
-                       dbg("named_node_up: sending publish msg to "
+                       dbg("tipc_named_node_up: sending publish msg to "
                            "<%u.%u.%u>\n", tipc_zone(node), 
                            tipc_cluster(node), tipc_node(node));
-                       link_send(buf, node, node);
+                       tipc_link_send(buf, node, node);
                        buf = 0;
                }
        }
 exit:
-       read_unlock_bh(&nametbl_lock); 
+       read_unlock_bh(&tipc_nametbl_lock); 
 }
 
 /**
@@ -221,73 +221,73 @@ exit:
 static void node_is_down(struct publication *publ)
 {
        struct publication *p;
-        write_lock_bh(&nametbl_lock);
+        write_lock_bh(&tipc_nametbl_lock);
        dbg("node_is_down: withdrawing %u, %u, %u\n", 
            publ->type, publ->lower, publ->upper);
         publ->key += 1222345;
-       p = nametbl_remove_publ(publ->type, publ->lower, 
-                               publ->node, publ->ref, publ->key);
+       p = tipc_nametbl_remove_publ(publ->type, publ->lower, 
+                                    publ->node, publ->ref, publ->key);
         assert(p == publ);
-       write_unlock_bh(&nametbl_lock);
+       write_unlock_bh(&tipc_nametbl_lock);
        if (publ)
                kfree(publ);
 }
 
 /**
- * named_recv - process name table update message sent by another node
+ * tipc_named_recv - process name table update message sent by another node
  */
 
-void named_recv(struct sk_buff *buf)
+void tipc_named_recv(struct sk_buff *buf)
 {
        struct publication *publ;
        struct tipc_msg *msg = buf_msg(buf);
        struct distr_item *item = (struct distr_item *)msg_data(msg);
        u32 count = msg_data_sz(msg) / ITEM_SIZE;
 
-       write_lock_bh(&nametbl_lock); 
+       write_lock_bh(&tipc_nametbl_lock); 
        while (count--) {
                if (msg_type(msg) == PUBLICATION) {
-                       dbg("named_recv: got publication for %u, %u, %u\n", 
+                       dbg("tipc_named_recv: got publication for %u, %u, %u\n", 
                            ntohl(item->type), ntohl(item->lower),
                            ntohl(item->upper));
-                       publ = nametbl_insert_publ(ntohl(item->type), 
-                                                  ntohl(item->lower),
-                                                  ntohl(item->upper),
-                                                  TIPC_CLUSTER_SCOPE,
-                                                  msg_orignode(msg), 
-                                                  ntohl(item->ref),
-                                                  ntohl(item->key));
+                       publ = tipc_nametbl_insert_publ(ntohl(item->type), 
+                                                       ntohl(item->lower),
+                                                       ntohl(item->upper),
+                                                       TIPC_CLUSTER_SCOPE,
+                                                       msg_orignode(msg), 
+                                                       ntohl(item->ref),
+                                                       ntohl(item->key));
                        if (publ) {
-                               nodesub_subscribe(&publ->subscr, 
-                                                 msg_orignode(msg), 
-                                                 publ,
-                                                 (net_ev_handler)node_is_down);
+                               tipc_nodesub_subscribe(&publ->subscr, 
+                                                      msg_orignode(msg), 
+                                                      publ,
+                                                      (net_ev_handler)node_is_down);
                        }
                } else if (msg_type(msg) == WITHDRAWAL) {
-                       dbg("named_recv: got withdrawl for %u, %u, %u\n", 
+                       dbg("tipc_named_recv: got withdrawl for %u, %u, %u\n", 
                            ntohl(item->type), ntohl(item->lower),
                            ntohl(item->upper));
-                       publ = nametbl_remove_publ(ntohl(item->type),
-                                                  ntohl(item->lower),
-                                                  msg_orignode(msg),
-                                                  ntohl(item->ref),
-                                                  ntohl(item->key));
+                       publ = tipc_nametbl_remove_publ(ntohl(item->type),
+                                                       ntohl(item->lower),
+                                                       msg_orignode(msg),
+                                                       ntohl(item->ref),
+                                                       ntohl(item->key));
 
                        if (publ) {
-                               nodesub_unsubscribe(&publ->subscr);
+                               tipc_nodesub_unsubscribe(&publ->subscr);
                                kfree(publ);
                        }
                } else {
-                       warn("named_recv: unknown msg\n");
+                       warn("tipc_named_recv: unknown msg\n");
                }
                item++;
        }
-       write_unlock_bh(&nametbl_lock); 
+       write_unlock_bh(&tipc_nametbl_lock); 
        buf_discard(buf);
 }
 
 /**
- * named_reinit - re-initialize local publication list
+ * tipc_named_reinit - re-initialize local publication list
  * 
  * This routine is called whenever TIPC networking is (re)enabled.
  * All existing publications by this node that have "cluster" or "zone" scope
@@ -295,15 +295,15 @@ void named_recv(struct sk_buff *buf)
  * (If the node's address is unchanged, the update loop terminates immediately.)
  */
 
-void named_reinit(void)
+void tipc_named_reinit(void)
 {
        struct publication *publ;
 
-       write_lock_bh(&nametbl_lock); 
+       write_lock_bh(&tipc_nametbl_lock); 
        list_for_each_entry(publ, &publ_root, local_list) {
                if (publ->node == tipc_own_addr)
                        break;
                publ->node = tipc_own_addr;
        }
-       write_unlock_bh(&nametbl_lock); 
+       write_unlock_bh(&tipc_nametbl_lock); 
 }
index a04bdeac84ea04a1b79b3333566594ecf78e609e..843da0172f4e9ec974cfb5ea48d5240ad0fe9131 100644 (file)
 
 #include "name_table.h"
 
-void named_publish(struct publication *publ);
-void named_withdraw(struct publication *publ);
-void named_node_up(unsigned long node);
-void named_recv(struct sk_buff *buf);
-void named_reinit(void);
+void tipc_named_publish(struct publication *publ);
+void tipc_named_withdraw(struct publication *publ);
+void tipc_named_node_up(unsigned long node);
+void tipc_named_recv(struct sk_buff *buf);
+void tipc_named_reinit(void);
 
 #endif
index 972c83eb83b4a0b49bca407cec867f0a098b674e..3f4b23bd08f74032935aa384825fadc755242f86 100644 (file)
@@ -99,9 +99,9 @@ struct name_table {
        u32 local_publ_count;
 };
 
-struct name_table table = { NULL } ;
+static struct name_table table = { NULL } ;
 static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
-rwlock_t nametbl_lock = RW_LOCK_UNLOCKED;
+rwlock_t tipc_nametbl_lock = RW_LOCK_UNLOCKED;
 
 
 static inline int hash(int x)
@@ -139,10 +139,10 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
 }
 
 /**
- * subseq_alloc - allocate a specified number of sub-sequence structures
+ * tipc_subseq_alloc - allocate a specified number of sub-sequence structures
  */
 
-struct sub_seq *subseq_alloc(u32 cnt)
+struct sub_seq *tipc_subseq_alloc(u32 cnt)
 {
        u32 sz = cnt * sizeof(struct sub_seq);
        struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC);
@@ -153,16 +153,16 @@ struct sub_seq *subseq_alloc(u32 cnt)
 }
 
 /**
- * nameseq_create - create a name sequence structure for the specified 'type'
+ * tipc_nameseq_create - create a name sequence structure for the specified 'type'
  * 
  * Allocates a single sub-sequence structure and sets it to all 0's.
  */
 
-struct name_seq *nameseq_create(u32 type, struct hlist_head *seq_head)
+struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
 {
        struct name_seq *nseq = 
                (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC);
-       struct sub_seq *sseq = subseq_alloc(1);
+       struct sub_seq *sseq = tipc_subseq_alloc(1);
 
        if (!nseq || !sseq) {
                warn("Memory squeeze; failed to create name sequence\n");
@@ -175,7 +175,7 @@ struct name_seq *nameseq_create(u32 type, struct hlist_head *seq_head)
        nseq->lock = SPIN_LOCK_UNLOCKED;
        nseq->type = type;
        nseq->sseqs = sseq;
-       dbg("nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n",
+       dbg("tipc_nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n",
            nseq, type, nseq->sseqs, nseq->first_free);
        nseq->alloc = 1;
        INIT_HLIST_NODE(&nseq->ns_list);
@@ -240,10 +240,10 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
 }
 
 /**
- * nameseq_insert_publ - 
+ * tipc_nameseq_insert_publ - 
  */
 
-struct publication *nameseq_insert_publ(struct name_seq *nseq,
+struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
                                        u32 type, u32 lower, u32 upper,
                                        u32 scope, u32 node, u32 port, u32 key)
 {
@@ -285,7 +285,7 @@ struct publication *nameseq_insert_publ(struct name_seq *nseq,
 
                if (nseq->first_free == nseq->alloc) {
                        struct sub_seq *sseqs = nseq->sseqs;
-                       nseq->sseqs = subseq_alloc(nseq->alloc * 2);
+                       nseq->sseqs = tipc_subseq_alloc(nseq->alloc * 2);
                        if (nseq->sseqs != NULL) {
                                memcpy(nseq->sseqs, sseqs,
                                       nseq->alloc * sizeof (struct sub_seq));
@@ -354,23 +354,23 @@ struct publication *nameseq_insert_publ(struct name_seq *nseq,
         */
        list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
                dbg("calling report_overlap()\n");
-               subscr_report_overlap(s,
-                                     publ->lower,
-                                     publ->upper,
-                                     TIPC_PUBLISHED,
-                                     publ->ref, 
-                                     publ->node,
-                                     created_subseq);
+               tipc_subscr_report_overlap(s,
+                                          publ->lower,
+                                          publ->upper,
+                                          TIPC_PUBLISHED,
+                                          publ->ref, 
+                                          publ->node,
+                                          created_subseq);
        }
        return publ;
 }
 
 /**
- * nameseq_remove_publ -
+ * tipc_nameseq_remove_publ -
  */
 
-struct publication *nameseq_remove_publ(struct name_seq *nseq, u32 inst,
-                                       u32 node, u32 ref, u32 key)
+struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
+                                            u32 node, u32 ref, u32 key)
 {
        struct publication *publ;
        struct publication *prev;
@@ -470,24 +470,24 @@ struct publication *nameseq_remove_publ(struct name_seq *nseq, u32 inst,
         * Any subscriptions waiting ? 
         */
        list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
-               subscr_report_overlap(s,
-                                     publ->lower,
-                                     publ->upper,
-                                     TIPC_WITHDRAWN, 
-                                     publ->ref, 
-                                     publ->node,
-                                     removed_subseq);
+               tipc_subscr_report_overlap(s,
+                                          publ->lower,
+                                          publ->upper,
+                                          TIPC_WITHDRAWN, 
+                                          publ->ref, 
+                                          publ->node,
+                                          removed_subseq);
        }
        return publ;
 }
 
 /**
- * nameseq_subscribe: attach a subscription, and issue
+ * tipc_nameseq_subscribe: attach a subscription, and issue
  * the prescribed number of events if there is any sub-
  * sequence overlapping with the requested sequence
  */
 
-void nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
+void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
 {
        struct sub_seq *sseq = nseq->sseqs;
 
@@ -498,18 +498,18 @@ void nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
 
        while (sseq != &nseq->sseqs[nseq->first_free]) {
                struct publication *zl = sseq->zone_list;
-               if (zl && subscr_overlap(s,sseq->lower,sseq->upper)) {
+               if (zl && tipc_subscr_overlap(s,sseq->lower,sseq->upper)) {
                        struct publication *crs = zl;
                        int must_report = 1;
 
                        do {
-                               subscr_report_overlap(s, 
-                                                      sseq->lower, 
-                                                      sseq->upper,
-                                                      TIPC_PUBLISHED,
-                                                      crs->ref,
-                                                      crs->node,
-                                                      must_report);
+                               tipc_subscr_report_overlap(s, 
+                                                          sseq->lower, 
+                                                          sseq->upper,
+                                                          TIPC_PUBLISHED,
+                                                          crs->ref,
+                                                          crs->node,
+                                                          must_report);
                                must_report = 0;
                                crs = crs->zone_list_next;
                        } while (crs != zl);
@@ -538,8 +538,8 @@ static struct name_seq *nametbl_find_seq(u32 type)
        return 0;
 };
 
-struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper,
-                   u32 scope, u32 node, u32 port, u32 key)
+struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
+                                            u32 scope, u32 node, u32 port, u32 key)
 {
        struct name_seq *seq = nametbl_find_seq(type);
 
@@ -552,19 +552,19 @@ struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper,
 
        dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node);
        if (!seq) {
-               seq = nameseq_create(type, &table.types[hash(type)]);
-               dbg("nametbl_insert_publ: created %x\n", seq);
+               seq = tipc_nameseq_create(type, &table.types[hash(type)]);
+               dbg("tipc_nametbl_insert_publ: created %x\n", seq);
        }
        if (!seq)
                return 0;
 
        assert(seq->type == type);
-       return nameseq_insert_publ(seq, type, lower, upper,
-                                  scope, node, port, key);
+       return tipc_nameseq_insert_publ(seq, type, lower, upper,
+                                       scope, node, port, key);
 }
 
-struct publication *nametbl_remove_publ(u32 type, u32 lower, 
-                                       u32 node, u32 ref, u32 key)
+struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, 
+                                            u32 node, u32 ref, u32 key)
 {
        struct publication *publ;
        struct name_seq *seq = nametbl_find_seq(type);
@@ -573,7 +573,7 @@ struct publication *nametbl_remove_publ(u32 type, u32 lower,
                return 0;
 
        dbg("Withdrawing <%u,%u> from %x\n", type, lower, node);
-       publ = nameseq_remove_publ(seq, lower, node, ref, key);
+       publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
 
        if (!seq->first_free && list_empty(&seq->subscriptions)) {
                hlist_del_init(&seq->ns_list);
@@ -584,14 +584,14 @@ struct publication *nametbl_remove_publ(u32 type, u32 lower,
 }
 
 /*
- * nametbl_translate(): Translate tipc_name -> tipc_portid.
+ * tipc_nametbl_translate(): Translate tipc_name -> tipc_portid.
  *                      Very time-critical.
  *
  * Note: on entry 'destnode' is the search domain used during translation;
  *       on exit it passes back the node address of the matching port (if any)
  */
 
-u32 nametbl_translate(u32 type, u32 instance, u32 *destnode)
+u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
 {
        struct sub_seq *sseq;
        struct publication *publ = 0;
@@ -601,7 +601,7 @@ u32 nametbl_translate(u32 type, u32 instance, u32 *destnode)
        if (!in_scope(*destnode, tipc_own_addr))
                return 0;
 
-       read_lock_bh(&nametbl_lock);
+       read_lock_bh(&tipc_nametbl_lock);
        seq = nametbl_find_seq(type);
        if (unlikely(!seq))
                goto not_found;
@@ -619,7 +619,7 @@ found:
                        ref = publ->ref;
                        *destnode = publ->node;
                        spin_unlock_bh(&seq->lock);
-                       read_unlock_bh(&nametbl_lock);
+                       read_unlock_bh(&tipc_nametbl_lock);
                        return ref;
                }
                publ = sseq->cluster_list;
@@ -657,12 +657,12 @@ found:
        spin_unlock_bh(&seq->lock);
 not_found:
        *destnode = 0;
-       read_unlock_bh(&nametbl_lock);
+       read_unlock_bh(&tipc_nametbl_lock);
        return 0;
 }
 
 /**
- * nametbl_mc_translate - find multicast destinations
+ * tipc_nametbl_mc_translate - find multicast destinations
  * 
  * Creates list of all local ports that overlap the given multicast address;
  * also determines if any off-node ports overlap.
@@ -674,15 +674,15 @@ not_found:
  * Returns non-zero if any off-node ports overlap
  */
 
-int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
-                        struct port_list *dports)
+int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
+                             struct port_list *dports)
 {
        struct name_seq *seq;
        struct sub_seq *sseq;
        struct sub_seq *sseq_stop;
        int res = 0;
 
-       read_lock_bh(&nametbl_lock);
+       read_lock_bh(&tipc_nametbl_lock);
        seq = nametbl_find_seq(type);
        if (!seq)
                goto exit;
@@ -700,7 +700,7 @@ int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
                if (publ && (publ->scope <= limit))
                        do {
                                if (publ->node == tipc_own_addr)
-                                       port_list_add(dports, publ->ref);
+                                       tipc_port_list_add(dports, publ->ref);
                                else
                                        res = 1;
                                publ = publ->cluster_list_next;
@@ -709,15 +709,15 @@ int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
 
        spin_unlock_bh(&seq->lock);
 exit:
-       read_unlock_bh(&nametbl_lock);
+       read_unlock_bh(&tipc_nametbl_lock);
        return res;
 }
 
 /**
- * nametbl_publish_rsv - publish port name using a reserved name type
+ * tipc_nametbl_publish_rsv - publish port name using a reserved name type
  */
 
-int nametbl_publish_rsv(u32 ref, unsigned int scope, 
+int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope, 
                        struct tipc_name_seq const *seq)
 {
        int res;
@@ -729,10 +729,10 @@ int nametbl_publish_rsv(u32 ref, unsigned int scope,
 }
 
 /**
- * nametbl_publish - add name publication to network name tables
+ * tipc_nametbl_publish - add name publication to network name tables
  */
 
-struct publication *nametbl_publish(u32 type, u32 lower, u32 upper, 
+struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, 
                                    u32 scope, u32 port_ref, u32 key)
 {
        struct publication *publ;
@@ -748,77 +748,77 @@ struct publication *nametbl_publish(u32 type, u32 lower, u32 upper,
                return 0;
        }
 
-       write_lock_bh(&nametbl_lock);
+       write_lock_bh(&tipc_nametbl_lock);
        table.local_publ_count++;
-       publ = nametbl_insert_publ(type, lower, upper, scope,
+       publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
                                   tipc_own_addr, port_ref, key);
        if (publ && (scope != TIPC_NODE_SCOPE)) {
-               named_publish(publ);
+               tipc_named_publish(publ);
        }
-       write_unlock_bh(&nametbl_lock);
+       write_unlock_bh(&tipc_nametbl_lock);
        return publ;
 }
 
 /**
- * nametbl_withdraw - withdraw name publication from network name tables
+ * tipc_nametbl_withdraw - withdraw name publication from network name tables
  */
 
-int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
+int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
 {
        struct publication *publ;
 
-       dbg("nametbl_withdraw:<%d,%d,%d>\n", type, lower, key);
-       write_lock_bh(&nametbl_lock);
-       publ = nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
+       dbg("tipc_nametbl_withdraw:<%d,%d,%d>\n", type, lower, key);
+       write_lock_bh(&tipc_nametbl_lock);
+       publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
        if (publ) {
                table.local_publ_count--;
                if (publ->scope != TIPC_NODE_SCOPE)
-                       named_withdraw(publ);
-               write_unlock_bh(&nametbl_lock);
+                       tipc_named_withdraw(publ);
+               write_unlock_bh(&tipc_nametbl_lock);
                list_del_init(&publ->pport_list);
                kfree(publ);
                return 1;
        }
-       write_unlock_bh(&nametbl_lock);
+       write_unlock_bh(&tipc_nametbl_lock);
        return 0;
 }
 
 /**
- * nametbl_subscribe - add a subscription object to the name table
+ * tipc_nametbl_subscribe - add a subscription object to the name table
  */
 
 void
-nametbl_subscribe(struct subscription *s)
+tipc_nametbl_subscribe(struct subscription *s)
 {
        u32 type = s->seq.type;
        struct name_seq *seq;
 
-        write_lock_bh(&nametbl_lock);
+        write_lock_bh(&tipc_nametbl_lock);
        seq = nametbl_find_seq(type);
        if (!seq) {
-               seq = nameseq_create(type, &table.types[hash(type)]);
+               seq = tipc_nameseq_create(type, &table.types[hash(type)]);
        }
         if (seq){
                 spin_lock_bh(&seq->lock);
-                dbg("nametbl_subscribe:found %x for <%u,%u,%u>\n",
+                dbg("tipc_nametbl_subscribe:found %x for <%u,%u,%u>\n",
                     seq, type, s->seq.lower, s->seq.upper);
                 assert(seq->type == type);
-                nameseq_subscribe(seq, s);
+                tipc_nameseq_subscribe(seq, s);
                 spin_unlock_bh(&seq->lock);
         }
-        write_unlock_bh(&nametbl_lock);
+        write_unlock_bh(&tipc_nametbl_lock);
 }
 
 /**
- * nametbl_unsubscribe - remove a subscription object from name table
+ * tipc_nametbl_unsubscribe - remove a subscription object from name table
  */
 
 void
-nametbl_unsubscribe(struct subscription *s)
+tipc_nametbl_unsubscribe(struct subscription *s)
 {
        struct name_seq *seq;
 
-        write_lock_bh(&nametbl_lock);
+        write_lock_bh(&tipc_nametbl_lock);
         seq = nametbl_find_seq(s->seq.type);
        if (seq != NULL){
                 spin_lock_bh(&seq->lock);
@@ -830,7 +830,7 @@ nametbl_unsubscribe(struct subscription *s)
                         kfree(seq);
                 }
         }
-        write_unlock_bh(&nametbl_lock);
+        write_unlock_bh(&tipc_nametbl_lock);
 }
 
 
@@ -983,17 +983,17 @@ static void nametbl_list(struct print_buf *buf, u32 depth_info,
        }
 }
 
-void nametbl_print(struct print_buf *buf, const char *str)
+void tipc_nametbl_print(struct print_buf *buf, const char *str)
 {
        tipc_printf(buf, str);
-       read_lock_bh(&nametbl_lock);
+       read_lock_bh(&tipc_nametbl_lock);
        nametbl_list(buf, 0, 0, 0, 0);
-       read_unlock_bh(&nametbl_lock);
+       read_unlock_bh(&tipc_nametbl_lock);
 }
 
 #define MAX_NAME_TBL_QUERY 32768
 
-struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
 {
        struct sk_buff *buf;
        struct tipc_name_table_query *argv;
@@ -1002,20 +1002,20 @@ struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space)
        int str_len;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
-       buf = cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY));
+       buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY));
        if (!buf)
                return NULL;
 
        rep_tlv = (struct tlv_desc *)buf->data;
-       printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
+       tipc_printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
        argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
-       read_lock_bh(&nametbl_lock);
+       read_lock_bh(&tipc_nametbl_lock);
        nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type), 
                     ntohl(argv->lowbound), ntohl(argv->upbound));
-       read_unlock_bh(&nametbl_lock);
-       str_len = printbuf_validate(&b);
+       read_unlock_bh(&tipc_nametbl_lock);
+       str_len = tipc_printbuf_validate(&b);
 
        skb_put(buf, TLV_SPACE(str_len));
        TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -1023,12 +1023,12 @@ struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space)
        return buf;
 }
 
-void nametbl_dump(void)
+void tipc_nametbl_dump(void)
 {
-       nametbl_list(CONS, 0, 0, 0, 0);
+       nametbl_list(TIPC_CONS, 0, 0, 0, 0);
 }
 
-int nametbl_init(void)
+int tipc_nametbl_init(void)
 {
        int array_size = sizeof(struct hlist_head) * tipc_nametbl_size;
 
@@ -1036,14 +1036,14 @@ int nametbl_init(void)
        if (!table.types)
                return -ENOMEM;
 
-       write_lock_bh(&nametbl_lock);
+       write_lock_bh(&tipc_nametbl_lock);
        memset(table.types, 0, array_size);
        table.local_publ_count = 0;
-       write_unlock_bh(&nametbl_lock);
+       write_unlock_bh(&tipc_nametbl_lock);
        return 0;
 }
 
-void nametbl_stop(void)
+void tipc_nametbl_stop(void)
 {
        struct hlist_head *seq_head;
        struct hlist_node *seq_node;
@@ -1054,7 +1054,7 @@ void nametbl_stop(void)
        if (!table.types)
                return;
 
-       write_lock_bh(&nametbl_lock);
+       write_lock_bh(&tipc_nametbl_lock);
        for (i = 0; i < tipc_nametbl_size; i++) {
                seq_head = &table.types[i];
                hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) {
@@ -1075,5 +1075,5 @@ void nametbl_stop(void)
        }
        kfree(table.types);
        table.types = NULL;
-       write_unlock_bh(&nametbl_lock);
+       write_unlock_bh(&tipc_nametbl_lock);
 }
index f82693384f60e44b30cb9e3c3278fcf77ac807c7..e8a3d71763ce637e5bdfabe753be917c2792ae92 100644 (file)
@@ -85,24 +85,24 @@ struct publication {
 };
 
 
-extern rwlock_t nametbl_lock;
+extern rwlock_t tipc_nametbl_lock;
 
-struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space);
-u32 nametbl_translate(u32 type, u32 instance, u32 *node);
-int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 
+struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
+u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
+int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 
                         struct port_list *dports);
-int nametbl_publish_rsv(u32 ref, unsigned int scope, 
+int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope, 
                        struct tipc_name_seq const *seq);
-struct publication *nametbl_publish(u32 type, u32 lower, u32 upper,
+struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
                                    u32 scope, u32 port_ref, u32 key);
-int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
-struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper,
+int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
+struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
                                        u32 scope, u32 node, u32 ref, u32 key);
-struct publication *nametbl_remove_publ(u32 type, u32 lower, 
+struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, 
                                        u32 node, u32 ref, u32 key);
-void nametbl_subscribe(struct subscription *s);
-void nametbl_unsubscribe(struct subscription *s);
-int nametbl_init(void);
-void nametbl_stop(void);
+void tipc_nametbl_subscribe(struct subscription *s);
+void tipc_nametbl_unsubscribe(struct subscription *s);
+int tipc_nametbl_init(void);
+void tipc_nametbl_stop(void);
 
 #endif
index 6826b493c1d6c28dc13b3496edbb52c191934fcf..074891ad4f09a338b5aab2f9ef567f2fb908fc44 100644 (file)
  * 1: The routing hierarchy.
  *    Comprises the structures 'zone', 'cluster', 'node', 'link' 
  *    and 'bearer'. The whole hierarchy is protected by a big 
- *    read/write lock, net_lock, to enssure that nothing is added 
+ *    read/write lock, tipc_net_lock, to enssure that nothing is added 
  *    or removed while code is accessing any of these structures. 
  *    This layer must not be called from the two others while they 
  *    hold any of their own locks.
  *    Neither must it itself do any upcalls to the other two before
- *    it has released net_lock and other protective locks.
+ *    it has released tipc_net_lock and other protective locks.
  *
- *   Within the net_lock domain there are two sub-domains;'node' and 
+ *   Within the tipc_net_lock domain there are two sub-domains;'node' and 
  *   'bearer', where local write operations are permitted,
  *   provided that those are protected by individual spin_locks
- *   per instance. Code holding net_lock(read) and a node spin_lock 
+ *   per instance. Code holding tipc_net_lock(read) and a node spin_lock 
  *   is permitted to poke around in both the node itself and its
  *   subordinate links. I.e, it can update link counters and queues, 
  *   change link state, send protocol messages, and alter the 
  *   "active_links" array in the node; but it can _not_ remove a link 
  *   or a node from the overall structure.
  *   Correspondingly, individual bearers may change status within a 
- *   net_lock(read), protected by an individual spin_lock ber bearer 
- *   instance, but it needs net_lock(write) to remove/add any bearers.
+ *   tipc_net_lock(read), protected by an individual spin_lock ber bearer 
+ *   instance, but it needs tipc_net_lock(write) to remove/add any bearers.
  *     
  *
  *  2: The transport level of the protocol. 
  *       (Nobody is using read-only access to this, so it can just as 
  *       well be changed to a spin_lock)
  *     - A spin lock to protect the registry of kernel/driver users (reg.c)
- *     - A global spin_lock (port_lock), which only task is to ensure 
+ *     - A global spin_lock (tipc_port_lock), which only task is to ensure 
  *       consistency where more than one port is involved in an operation,
  *       i.e., whe a port is part of a linked list of ports.
  *       There are two such lists; 'port_list', which is used for management,
  *       and 'wait_list', which is used to queue ports during congestion.
  *     
  *  3: The name table (name_table.c, name_distr.c, subscription.c)
- *     - There is one big read/write-lock (nametbl_lock) protecting the 
+ *     - There is one big read/write-lock (tipc_nametbl_lock) protecting the 
  *       overall name table structure. Nothing must be added/removed to 
  *       this structure without holding write access to it.
  *     - There is one local spin_lock per sub_sequence, which can be seen
- *       as a sub-domain to the nametbl_lock domain. It is used only
+ *       as a sub-domain to the tipc_nametbl_lock domain. It is used only
  *       for translation operations, and is needed because a translation
  *       steps the root of the 'publication' linked list between each lookup.
- *       This is always used within the scope of a nametbl_lock(read).
+ *       This is always used within the scope of a tipc_nametbl_lock(read).
  *     - A local spin_lock protecting the queue of subscriber events.
 */
 
-rwlock_t net_lock = RW_LOCK_UNLOCKED;
-struct network net = { 0 };
+rwlock_t tipc_net_lock = RW_LOCK_UNLOCKED;
+struct network tipc_net = { 0 };
 
-struct node *net_select_remote_node(u32 addr, u32 ref) 
+struct node *tipc_net_select_remote_node(u32 addr, u32 ref) 
 {
-       return zone_select_remote_node(net.zones[tipc_zone(addr)], addr, ref);
+       return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref);
 }
 
-u32 net_select_router(u32 addr, u32 ref)
+u32 tipc_net_select_router(u32 addr, u32 ref)
 {
-       return zone_select_router(net.zones[tipc_zone(addr)], addr, ref);
+       return tipc_zone_select_router(tipc_net.zones[tipc_zone(addr)], addr, ref);
 }
 
 
-u32 net_next_node(u32 a)
+u32 tipc_net_next_node(u32 a)
 {
-       if (net.zones[tipc_zone(a)])
-               return zone_next_node(a);
+       if (tipc_net.zones[tipc_zone(a)])
+               return tipc_zone_next_node(a);
        return 0;
 }
 
-void net_remove_as_router(u32 router)
+void tipc_net_remove_as_router(u32 router)
 {
        u32 z_num;
 
        for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
-               if (!net.zones[z_num])
+               if (!tipc_net.zones[z_num])
                        continue;
-               zone_remove_as_router(net.zones[z_num], router);
+               tipc_zone_remove_as_router(tipc_net.zones[z_num], router);
        }
 }
 
-void net_send_external_routes(u32 dest)
+void tipc_net_send_external_routes(u32 dest)
 {
        u32 z_num;
 
        for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
-               if (net.zones[z_num])
-                       zone_send_external_routes(net.zones[z_num], dest);
+               if (tipc_net.zones[z_num])
+                       tipc_zone_send_external_routes(tipc_net.zones[z_num], dest);
        }
 }
 
-int net_init(void)
+static int net_init(void)
 {
        u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1);
 
-       memset(&net, 0, sizeof(net));
-       net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC);
-       if (!net.zones) {
+       memset(&tipc_net, 0, sizeof(tipc_net));
+       tipc_net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC);
+       if (!tipc_net.zones) {
                return -ENOMEM;
        }
-       memset(net.zones, 0, sz);
+       memset(tipc_net.zones, 0, sz);
        return TIPC_OK;
 }
 
-void net_stop(void)
+static void net_stop(void)
 {
        u32 z_num;
 
-       if (!net.zones)
+       if (!tipc_net.zones)
                return;
 
        for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
-               zone_delete(net.zones[z_num]);
+               tipc_zone_delete(tipc_net.zones[z_num]);
        }
-       kfree(net.zones);
-       net.zones = 0;
+       kfree(tipc_net.zones);
+       tipc_net.zones = 0;
 }
 
 static void net_route_named_msg(struct sk_buff *buf)
@@ -191,26 +191,26 @@ static void net_route_named_msg(struct sk_buff *buf)
        u32 dport;
 
        if (!msg_named(msg)) {
-               msg_dbg(msg, "net->drop_nam:");
+               msg_dbg(msg, "tipc_net->drop_nam:");
                buf_discard(buf);
                return;
        }
 
        dnode = addr_domain(msg_lookup_scope(msg));
-       dport = nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
-       dbg("net->lookup<%u,%u>-><%u,%x>\n",
+       dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
+       dbg("tipc_net->lookup<%u,%u>-><%u,%x>\n",
            msg_nametype(msg), msg_nameinst(msg), dport, dnode);
        if (dport) {
                msg_set_destnode(msg, dnode);
                msg_set_destport(msg, dport);
-               net_route_msg(buf);
+               tipc_net_route_msg(buf);
                return;
        }
-       msg_dbg(msg, "net->rej:NO NAME: ");
+       msg_dbg(msg, "tipc_net->rej:NO NAME: ");
        tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
 }
 
-void net_route_msg(struct sk_buff *buf)
+void tipc_net_route_msg(struct sk_buff *buf)
 {
        struct tipc_msg *msg;
        u32 dnode;
@@ -232,29 +232,29 @@ void net_route_msg(struct sk_buff *buf)
                return;
        }
 
-       msg_dbg(msg, "net->rout: ");
+       msg_dbg(msg, "tipc_net->rout: ");
 
        /* Handle message for this node */
        dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
        if (in_scope(dnode, tipc_own_addr)) {
                if (msg_isdata(msg)) {
                        if (msg_mcast(msg)) 
-                               port_recv_mcast(buf, NULL);
+                               tipc_port_recv_mcast(buf, NULL);
                        else if (msg_destport(msg))
-                               port_recv_msg(buf);
+                               tipc_port_recv_msg(buf);
                        else
                                net_route_named_msg(buf);
                        return;
                }
                switch (msg_user(msg)) {
                case ROUTE_DISTRIBUTOR:
-                       cluster_recv_routing_table(buf);
+                       tipc_cltr_recv_routing_table(buf);
                        break;
                case NAME_DISTRIBUTOR:
-                       named_recv(buf);
+                       tipc_named_recv(buf);
                        break;
                case CONN_MANAGER:
-                       port_recv_proto_msg(buf);
+                       tipc_port_recv_proto_msg(buf);
                        break;
                default:
                        msg_dbg(msg,"DROP/NET/<REC<");
@@ -265,10 +265,10 @@ void net_route_msg(struct sk_buff *buf)
 
        /* Handle message for another node */
        msg_dbg(msg, "NET>SEND>: ");
-       link_send(buf, dnode, msg_link_selector(msg));
+       tipc_link_send(buf, dnode, msg_link_selector(msg));
 }
 
-int tipc_start_net(void)
+int tipc_net_start(void)
 {
        char addr_string[16];
        int res;
@@ -277,35 +277,35 @@ int tipc_start_net(void)
                return -ENOPROTOOPT;
 
        tipc_mode = TIPC_NET_MODE;
-       named_reinit();
-       port_reinit();
+       tipc_named_reinit();
+       tipc_port_reinit();
 
-       if ((res = bearer_init()) ||
+       if ((res = tipc_bearer_init()) ||
            (res = net_init()) ||
-           (res = cluster_init()) ||
-           (res = bclink_init())) {
+           (res = tipc_cltr_init()) ||
+           (res = tipc_bclink_init())) {
                return res;
        }
-        subscr_stop();
-       cfg_stop();
-       k_signal((Handler)subscr_start, 0);
-       k_signal((Handler)cfg_init, 0);
+        tipc_subscr_stop();
+       tipc_cfg_stop();
+       tipc_k_signal((Handler)tipc_subscr_start, 0);
+       tipc_k_signal((Handler)tipc_cfg_init, 0);
        info("Started in network mode\n");
        info("Own node address %s, network identity %u\n",
             addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
        return TIPC_OK;
 }
 
-void tipc_stop_net(void)
+void tipc_net_stop(void)
 {
        if (tipc_mode != TIPC_NET_MODE)
                return;
-        write_lock_bh(&net_lock);
-       bearer_stop();
+        write_lock_bh(&tipc_net_lock);
+       tipc_bearer_stop();
        tipc_mode = TIPC_NODE_MODE;
-       bclink_stop();
+       tipc_bclink_stop();
        net_stop();
-        write_unlock_bh(&net_lock);
+        write_unlock_bh(&tipc_net_lock);
        info("Left network mode \n");
 }
 
index 948c6d42102cd6bcd81e9a26d77e3b6b2f1db04d..f3e0b85e64753d1e7bedc728d45dd93aa5d2c26e 100644 (file)
@@ -49,18 +49,16 @@ struct network {
 };
 
 
-extern struct network net;
-extern rwlock_t net_lock;
+extern struct network tipc_net;
+extern rwlock_t tipc_net_lock;
 
-int net_init(void);
-void net_stop(void);
-void net_remove_as_router(u32 router);
-void net_send_external_routes(u32 dest);
-void net_route_msg(struct sk_buff *buf);
-struct node *net_select_remote_node(u32 addr, u32 ref);
-u32 net_select_router(u32 addr, u32 ref);
+void tipc_net_remove_as_router(u32 router);
+void tipc_net_send_external_routes(u32 dest);
+void tipc_net_route_msg(struct sk_buff *buf);
+struct node *tipc_net_select_remote_node(u32 addr, u32 ref);
+u32 tipc_net_select_router(u32 addr, u32 ref);
 
-int tipc_start_net(void);
-void tipc_stop_net(void);
+int tipc_net_start(void);
+void tipc_net_stop(void);
 
 #endif
index 19b3f4022532ac58a77389c52720c0c3282795e1..eb1bb4dce7af950f43bbf6c5899f649ea170e986 100644 (file)
@@ -47,13 +47,13 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
        int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN);
 
        if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
-               rep_buf = cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
+               rep_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
        else
-               rep_buf = cfg_do_cmd(req_userhdr->dest,
-                                    req_userhdr->cmd,
-                                    NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
-                                    NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
-                                    hdr_space);
+               rep_buf = tipc_cfg_do_cmd(req_userhdr->dest,
+                                         req_userhdr->cmd,
+                                         NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
+                                         NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
+                                         hdr_space);
 
        if (rep_buf) {
                skb_push(rep_buf, hdr_space);
@@ -81,7 +81,7 @@ static struct genl_ops ops = {
 
 static int family_registered = 0;
 
-int netlink_start(void)
+int tipc_netlink_start(void)
 {
 
 
@@ -103,7 +103,7 @@ int netlink_start(void)
        return -EFAULT;
 }
 
-void netlink_stop(void)
+void tipc_netlink_stop(void)
 {
        if (family_registered) {
                genl_unregister_family(&family);
index 05688d01138b3061a6f1cd7cd1b20138cf8a4008..6d65010e5fa14f25bb436a2298f55724ddd5bd24 100644 (file)
 #include "port.h"
 #include "bearer.h"
 #include "name_distr.h"
-#include "net.h"
 
 void node_print(struct print_buf *buf, struct node *n_ptr, char *str);
 static void node_lost_contact(struct node *n_ptr);
 static void node_established_contact(struct node *n_ptr);
 
-struct node *nodes = NULL;     /* sorted list of nodes within cluster */
+struct node *tipc_nodes = NULL;        /* sorted list of nodes within cluster */
 
 u32 tipc_own_tag = 0;
 
-struct node *node_create(u32 addr)
+struct node *tipc_node_create(u32 addr)
 {
        struct cluster *c_ptr;
        struct node *n_ptr;
@@ -68,16 +67,16 @@ struct node *node_create(u32 addr)
                 n_ptr->lock =  SPIN_LOCK_UNLOCKED;     
                 INIT_LIST_HEAD(&n_ptr->nsub);
        
-               c_ptr = cluster_find(addr);
+               c_ptr = tipc_cltr_find(addr);
                 if (c_ptr == NULL)
-                        c_ptr = cluster_create(addr);
+                        c_ptr = tipc_cltr_create(addr);
                 if (c_ptr != NULL) {
                         n_ptr->owner = c_ptr;
-                        cluster_attach_node(c_ptr, n_ptr);
+                        tipc_cltr_attach_node(c_ptr, n_ptr);
                         n_ptr->last_router = -1;
 
                         /* Insert node into ordered list */
-                        for (curr_node = &nodes; *curr_node; 
+                        for (curr_node = &tipc_nodes; *curr_node; 
                             curr_node = &(*curr_node)->next) {
                                 if (addr < (*curr_node)->addr) {
                                         n_ptr->next = *curr_node;
@@ -93,13 +92,13 @@ struct node *node_create(u32 addr)
        return n_ptr;
 }
 
-void node_delete(struct node *n_ptr)
+void tipc_node_delete(struct node *n_ptr)
 {
        if (!n_ptr)
                return;
 
 #if 0
-       /* Not needed because links are already deleted via bearer_stop() */
+       /* Not needed because links are already deleted via tipc_bearer_stop() */
 
        u32 l_num;
 
@@ -114,12 +113,12 @@ void node_delete(struct node *n_ptr)
 
 
 /**
- * node_link_up - handle addition of link
+ * tipc_node_link_up - handle addition of link
  * 
  * Link becomes active (alone or shared) or standby, depending on its priority.
  */
 
-void node_link_up(struct node *n_ptr, struct link *l_ptr)
+void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
 {
        struct link **active = &n_ptr->active_links[0];
 
@@ -136,7 +135,7 @@ void node_link_up(struct node *n_ptr, struct link *l_ptr)
                info("Link is standby\n");
                return;
        }
-       link_send_duplicate(active[0], l_ptr);
+       tipc_link_send_duplicate(active[0], l_ptr);
        if (l_ptr->priority == active[0]->priority) { 
                active[0] = l_ptr;
                return;
@@ -161,7 +160,7 @@ static void node_select_active_links(struct node *n_ptr)
        for (i = 0; i < MAX_BEARERS; i++) {
                 struct link *l_ptr = n_ptr->links[i];
 
-               if (!l_ptr || !link_is_up(l_ptr) ||
+               if (!l_ptr || !tipc_link_is_up(l_ptr) ||
                    (l_ptr->priority < highest_prio))
                        continue;
 
@@ -175,14 +174,14 @@ static void node_select_active_links(struct node *n_ptr)
 }
 
 /**
- * node_link_down - handle loss of link
+ * tipc_node_link_down - handle loss of link
  */
 
-void node_link_down(struct node *n_ptr, struct link *l_ptr)
+void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr)
 {
        struct link **active;
 
-       if (!link_is_active(l_ptr)) {
+       if (!tipc_link_is_active(l_ptr)) {
                info("Lost standby link <%s> on network plane %c\n",
                     l_ptr->name, l_ptr->b_ptr->net_plane);
                return;
@@ -197,40 +196,40 @@ void node_link_down(struct node *n_ptr, struct link *l_ptr)
                active[1] = active[0];
        if (active[0] == l_ptr)
                node_select_active_links(n_ptr);
-       if (node_is_up(n_ptr)) 
-               link_changeover(l_ptr);
+       if (tipc_node_is_up(n_ptr)) 
+               tipc_link_changeover(l_ptr);
        else 
                node_lost_contact(n_ptr);
 }
 
-int node_has_active_links(struct node *n_ptr)
+int tipc_node_has_active_links(struct node *n_ptr)
 {
        return (n_ptr && 
                ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
 }
 
-int node_has_redundant_links(struct node *n_ptr)
+int tipc_node_has_redundant_links(struct node *n_ptr)
 {
-       return (node_has_active_links(n_ptr) &&
+       return (tipc_node_has_active_links(n_ptr) &&
                (n_ptr->active_links[0] != n_ptr->active_links[1]));
 }
 
-int node_has_active_routes(struct node *n_ptr)
+int tipc_node_has_active_routes(struct node *n_ptr)
 {
        return (n_ptr && (n_ptr->last_router >= 0));
 }
 
-int node_is_up(struct node *n_ptr)
+int tipc_node_is_up(struct node *n_ptr)
 {
-       return (node_has_active_links(n_ptr) || node_has_active_routes(n_ptr));
+       return (tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr));
 }
 
-struct node *node_attach_link(struct link *l_ptr)
+struct node *tipc_node_attach_link(struct link *l_ptr)
 {
-       struct node *n_ptr = node_find(l_ptr->addr);
+       struct node *n_ptr = tipc_node_find(l_ptr->addr);
 
        if (!n_ptr)
-               n_ptr = node_create(l_ptr->addr);
+               n_ptr = tipc_node_create(l_ptr->addr);
         if (n_ptr) {
                u32 bearer_id = l_ptr->b_ptr->identity;
                char addr_string[16];
@@ -246,7 +245,7 @@ struct node *node_attach_link(struct link *l_ptr)
 
                 if (!n_ptr->links[bearer_id]) {
                         n_ptr->links[bearer_id] = l_ptr;
-                        net.zones[tipc_zone(l_ptr->addr)]->links++;
+                        tipc_net.zones[tipc_zone(l_ptr->addr)]->links++;
                         n_ptr->link_cnt++;
                         return n_ptr;
                 }
@@ -257,10 +256,10 @@ struct node *node_attach_link(struct link *l_ptr)
        return 0;
 }
 
-void node_detach_link(struct node *n_ptr, struct link *l_ptr)
+void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr)
 {
        n_ptr->links[l_ptr->b_ptr->identity] = 0;
-       net.zones[tipc_zone(l_ptr->addr)]->links--;
+       tipc_net.zones[tipc_zone(l_ptr->addr)]->links--;
        n_ptr->link_cnt--;
 }
 
@@ -315,45 +314,45 @@ static void node_established_contact(struct node *n_ptr)
        struct cluster *c_ptr;
 
        dbg("node_established_contact:-> %x\n", n_ptr->addr);
-       if (!node_has_active_routes(n_ptr)) { 
-               k_signal((Handler)named_node_up, n_ptr->addr);
+       if (!tipc_node_has_active_routes(n_ptr)) { 
+               tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
        }
 
         /* Syncronize broadcast acks */
-        n_ptr->bclink.acked = bclink_get_last_sent();
+        n_ptr->bclink.acked = tipc_bclink_get_last_sent();
 
        if (is_slave(tipc_own_addr))
                return;
        if (!in_own_cluster(n_ptr->addr)) {
                /* Usage case 1 (see above) */
-               c_ptr = cluster_find(tipc_own_addr);
+               c_ptr = tipc_cltr_find(tipc_own_addr);
                if (!c_ptr)
-                       c_ptr = cluster_create(tipc_own_addr);
+                       c_ptr = tipc_cltr_create(tipc_own_addr);
                 if (c_ptr)
-                        cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, 
-                                               tipc_max_nodes);
+                        tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, 
+                                                 tipc_max_nodes);
                return;
        } 
 
        c_ptr = n_ptr->owner;
        if (is_slave(n_ptr->addr)) {
                /* Usage case 2 (see above) */
-               cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes);
-               cluster_send_local_routes(c_ptr, n_ptr->addr);
+               tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes);
+               tipc_cltr_send_local_routes(c_ptr, n_ptr->addr);
                return;
        }
 
        if (n_ptr->bclink.supported) {
-               nmap_add(&cluster_bcast_nodes, n_ptr->addr);
+               tipc_nmap_add(&tipc_cltr_bcast_nodes, n_ptr->addr);
                if (n_ptr->addr < tipc_own_addr)
                        tipc_own_tag++;
        }
 
        /* Case 3 (see above) */
-       net_send_external_routes(n_ptr->addr);
-       cluster_send_slave_routes(c_ptr, n_ptr->addr);
-       cluster_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE,
-                               highest_allowed_slave);
+       tipc_net_send_external_routes(n_ptr->addr);
+       tipc_cltr_send_slave_routes(c_ptr, n_ptr->addr);
+       tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE,
+                                 tipc_highest_allowed_slave);
 }
 
 static void node_lost_contact(struct node *n_ptr)
@@ -375,39 +374,39 @@ static void node_lost_contact(struct node *n_ptr)
                 n_ptr->bclink.defragm = NULL;
         }            
         if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) { 
-                bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
+                tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
         }
 
         /* Update routing tables */
        if (is_slave(tipc_own_addr)) {
-               net_remove_as_router(n_ptr->addr);
+               tipc_net_remove_as_router(n_ptr->addr);
        } else {
                if (!in_own_cluster(n_ptr->addr)) { 
                        /* Case 4 (see above) */
-                       c_ptr = cluster_find(tipc_own_addr);
-                       cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1,
-                                                tipc_max_nodes);
+                       c_ptr = tipc_cltr_find(tipc_own_addr);
+                       tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
+                                                  tipc_max_nodes);
                } else {
                        /* Case 5 (see above) */
-                       c_ptr = cluster_find(n_ptr->addr);
+                       c_ptr = tipc_cltr_find(n_ptr->addr);
                        if (is_slave(n_ptr->addr)) {
-                               cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1,
-                                                        tipc_max_nodes);
+                               tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
+                                                          tipc_max_nodes);
                        } else {
                                if (n_ptr->bclink.supported) {
-                                       nmap_remove(&cluster_bcast_nodes, 
-                                                   n_ptr->addr);
+                                       tipc_nmap_remove(&tipc_cltr_bcast_nodes, 
+                                                        n_ptr->addr);
                                        if (n_ptr->addr < tipc_own_addr)
                                                tipc_own_tag--;
                                }
-                               net_remove_as_router(n_ptr->addr);
-                               cluster_bcast_lost_route(c_ptr, n_ptr->addr,
-                                                        LOWEST_SLAVE,
-                                                        highest_allowed_slave);
+                               tipc_net_remove_as_router(n_ptr->addr);
+                               tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr,
+                                                          LOWEST_SLAVE,
+                                                          tipc_highest_allowed_slave);
                        }
                }
        }
-       if (node_has_active_routes(n_ptr))
+       if (tipc_node_has_active_routes(n_ptr))
                return;
 
        info("Lost contact with %s\n", 
@@ -420,35 +419,35 @@ static void node_lost_contact(struct node *n_ptr)
                        continue;
                l_ptr->reset_checkpoint = l_ptr->next_in_no;
                l_ptr->exp_msg_count = 0;
-               link_reset_fragments(l_ptr);
+               tipc_link_reset_fragments(l_ptr);
        }
 
        /* Notify subscribers */
        list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
                 ns->node = 0;
                list_del_init(&ns->nodesub_list);
-               k_signal((Handler)ns->handle_node_down,
-                        (unsigned long)ns->usr_handle);
+               tipc_k_signal((Handler)ns->handle_node_down,
+                             (unsigned long)ns->usr_handle);
        }
 }
 
 /**
- * node_select_next_hop - find the next-hop node for a message
+ * tipc_node_select_next_hop - find the next-hop node for a message
  * 
  * Called by when cluster local lookup has failed.
  */
 
-struct node *node_select_next_hop(u32 addr, u32 selector)
+struct node *tipc_node_select_next_hop(u32 addr, u32 selector)
 {
        struct node *n_ptr;
        u32 router_addr;
 
-        if (!addr_domain_valid(addr))
+        if (!tipc_addr_domain_valid(addr))
                 return 0;
 
        /* Look for direct link to destination processsor */
-       n_ptr = node_find(addr);
-       if (n_ptr && node_has_active_links(n_ptr))
+       n_ptr = tipc_node_find(addr);
+       if (n_ptr && tipc_node_has_active_links(n_ptr))
                 return n_ptr;
 
        /* Cluster local system nodes *must* have direct links */
@@ -456,9 +455,9 @@ struct node *node_select_next_hop(u32 addr, u32 selector)
                return 0;
 
        /* Look for cluster local router with direct link to node */
-       router_addr = node_select_router(n_ptr, selector);
+       router_addr = tipc_node_select_router(n_ptr, selector);
        if (router_addr) 
-                return node_select(router_addr, selector);
+                return tipc_node_select(router_addr, selector);
 
        /* Slave nodes can only be accessed within own cluster via a 
           known router with direct link -- if no router was found,give up */
@@ -467,25 +466,25 @@ struct node *node_select_next_hop(u32 addr, u32 selector)
 
        /* Inter zone/cluster -- find any direct link to remote cluster */
        addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
-       n_ptr = net_select_remote_node(addr, selector);
-       if (n_ptr && node_has_active_links(n_ptr))
+       n_ptr = tipc_net_select_remote_node(addr, selector);
+       if (n_ptr && tipc_node_has_active_links(n_ptr))
                 return n_ptr;
 
        /* Last resort -- look for any router to anywhere in remote zone */
-       router_addr =  net_select_router(addr, selector);
+       router_addr =  tipc_net_select_router(addr, selector);
        if (router_addr) 
-                return node_select(router_addr, selector);
+                return tipc_node_select(router_addr, selector);
 
         return 0;
 }
 
 /**
- * node_select_router - select router to reach specified node
+ * tipc_node_select_router - select router to reach specified node
  * 
  * Uses a deterministic and fair algorithm for selecting router node. 
  */
 
-u32 node_select_router(struct node *n_ptr, u32 ref)
+u32 tipc_node_select_router(struct node *n_ptr, u32 ref)
 {
        u32 ulim;
        u32 mask;
@@ -523,7 +522,7 @@ u32 node_select_router(struct node *n_ptr, u32 ref)
        return tipc_addr(own_zone(), own_cluster(), r);
 }
 
-void node_add_router(struct node *n_ptr, u32 router)
+void tipc_node_add_router(struct node *n_ptr, u32 router)
 {
        u32 r_num = tipc_node(router);
 
@@ -534,7 +533,7 @@ void node_add_router(struct node *n_ptr, u32 router)
               !n_ptr->routers[n_ptr->last_router]);
 }
 
-void node_remove_router(struct node *n_ptr, u32 router)
+void tipc_node_remove_router(struct node *n_ptr, u32 router)
 {
        u32 r_num = tipc_node(router);
 
@@ -547,7 +546,7 @@ void node_remove_router(struct node *n_ptr, u32 router)
        while ((--n_ptr->last_router >= 0) && 
               !n_ptr->routers[n_ptr->last_router]);
 
-       if (!node_is_up(n_ptr))
+       if (!tipc_node_is_up(n_ptr))
                node_lost_contact(n_ptr);
 }
 
@@ -572,16 +571,16 @@ u32 tipc_available_nodes(const u32 domain)
        struct node *n_ptr;
        u32 cnt = 0;
 
-       for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
+       for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
                if (!in_scope(domain, n_ptr->addr))
                        continue;
-               if (node_is_up(n_ptr))
+               if (tipc_node_is_up(n_ptr))
                        cnt++;
        }
        return cnt;
 }
 
-struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
 {
        u32 domain;
        struct sk_buff *buf;
@@ -589,40 +588,40 @@ struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space)
         struct tipc_node_info node_info;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        domain = *(u32 *)TLV_DATA(req_tlv_area);
        domain = ntohl(domain);
-       if (!addr_domain_valid(domain))
-               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                             " (network address)");
+       if (!tipc_addr_domain_valid(domain))
+               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                                  " (network address)");
 
-        if (!nodes)
-                return cfg_reply_none();
+        if (!tipc_nodes)
+                return tipc_cfg_reply_none();
 
        /* For now, get space for all other nodes 
           (will need to modify this when slave nodes are supported */
 
-       buf = cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) *
-                           (tipc_max_nodes - 1));
+       buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) *
+                                  (tipc_max_nodes - 1));
        if (!buf)
                return NULL;
 
        /* Add TLVs for all nodes in scope */
 
-       for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
+       for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
                if (!in_scope(domain, n_ptr->addr))
                        continue;
                 node_info.addr = htonl(n_ptr->addr);
-                node_info.up = htonl(node_is_up(n_ptr));
-               cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 
-                              &node_info, sizeof(node_info));
+                node_info.up = htonl(tipc_node_is_up(n_ptr));
+               tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 
+                                   &node_info, sizeof(node_info));
        }
 
        return buf;
 }
 
-struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
+struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
 {
        u32 domain;
        struct sk_buff *buf;
@@ -630,22 +629,22 @@ struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
         struct tipc_link_info link_info;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
-               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
 
        domain = *(u32 *)TLV_DATA(req_tlv_area);
        domain = ntohl(domain);
-       if (!addr_domain_valid(domain))
-               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
-                                             " (network address)");
+       if (!tipc_addr_domain_valid(domain))
+               return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                                  " (network address)");
 
-        if (!nodes)
-                return cfg_reply_none();
+        if (!tipc_nodes)
+                return tipc_cfg_reply_none();
 
        /* For now, get space for 2 links to all other nodes + bcast link 
           (will need to modify this when slave nodes are supported */
 
-       buf = cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) *
-                           (2 * (tipc_max_nodes - 1) + 1));
+       buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) *
+                                  (2 * (tipc_max_nodes - 1) + 1));
        if (!buf)
                return NULL;
 
@@ -654,12 +653,12 @@ struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
         link_info.dest = tipc_own_addr & 0xfffff00;
        link_info.dest = htonl(link_info.dest);
         link_info.up = htonl(1);
-        sprintf(link_info.str, bc_link_name);
-       cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
+        sprintf(link_info.str, tipc_bclink_name);
+       tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
 
        /* Add TLVs for any other links in scope */
 
-       for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
+       for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
                 u32 i;
 
                if (!in_scope(domain, n_ptr->addr))
@@ -668,10 +667,10 @@ struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
                         if (!n_ptr->links[i]) 
                                 continue;
                         link_info.dest = htonl(n_ptr->addr);
-                        link_info.up = htonl(link_is_up(n_ptr->links[i]));
+                        link_info.up = htonl(tipc_link_is_up(n_ptr->links[i]));
                         strcpy(link_info.str, n_ptr->links[i]->name);
-                       cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 
-                                      &link_info, sizeof(link_info));
+                       tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 
+                                           &link_info, sizeof(link_info));
                 }
        }
 
index b39442badccf7e082c258ed4061e45acabe9e1eb..29f7ae6992d4db861c5676aab59589f1a92223b8 100644 (file)
@@ -92,31 +92,31 @@ struct node {
        } bclink;
 };
 
-extern struct node *nodes;
+extern struct node *tipc_nodes;
 extern u32 tipc_own_tag;
 
-struct node *node_create(u32 addr);
-void node_delete(struct node *n_ptr);
-struct node *node_attach_link(struct link *l_ptr);
-void node_detach_link(struct node *n_ptr, struct link *l_ptr);
-void node_link_down(struct node *n_ptr, struct link *l_ptr);
-void node_link_up(struct node *n_ptr, struct link *l_ptr);
-int node_has_active_links(struct node *n_ptr);
-int node_has_redundant_links(struct node *n_ptr);
-u32 node_select_router(struct node *n_ptr, u32 ref);
-struct node *node_select_next_hop(u32 addr, u32 selector);
-int node_is_up(struct node *n_ptr);
-void node_add_router(struct node *n_ptr, u32 router);
-void node_remove_router(struct node *n_ptr, u32 router);
-struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space);
-struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space);
+struct node *tipc_node_create(u32 addr);
+void tipc_node_delete(struct node *n_ptr);
+struct node *tipc_node_attach_link(struct link *l_ptr);
+void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr);
+void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr);
+void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr);
+int tipc_node_has_active_links(struct node *n_ptr);
+int tipc_node_has_redundant_links(struct node *n_ptr);
+u32 tipc_node_select_router(struct node *n_ptr, u32 ref);
+struct node *tipc_node_select_next_hop(u32 addr, u32 selector);
+int tipc_node_is_up(struct node *n_ptr);
+void tipc_node_add_router(struct node *n_ptr, u32 router);
+void tipc_node_remove_router(struct node *n_ptr, u32 router);
+struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
+struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
 
-static inline struct node *node_find(u32 addr)
+static inline struct node *tipc_node_find(u32 addr)
 {
        if (likely(in_own_cluster(addr)))
-               return local_nodes[tipc_node(addr)];
-       else if (addr_domain_valid(addr)) {
-               struct cluster *c_ptr = cluster_find(addr);
+               return tipc_local_nodes[tipc_node(addr)];
+       else if (tipc_addr_domain_valid(addr)) {
+               struct cluster *c_ptr = tipc_cltr_find(addr);
 
                if (c_ptr)
                        return c_ptr->nodes[tipc_node(addr)];
@@ -124,19 +124,19 @@ static inline struct node *node_find(u32 addr)
        return 0;
 }
 
-static inline struct node *node_select(u32 addr, u32 selector)
+static inline struct node *tipc_node_select(u32 addr, u32 selector)
 {
        if (likely(in_own_cluster(addr)))
-               return local_nodes[tipc_node(addr)];
-       return node_select_next_hop(addr, selector);
+               return tipc_local_nodes[tipc_node(addr)];
+       return tipc_node_select_next_hop(addr, selector);
 }
 
-static inline void node_lock(struct node *n_ptr)
+static inline void tipc_node_lock(struct node *n_ptr)
 {
        spin_lock_bh(&n_ptr->lock);
 }
 
-static inline void node_unlock(struct node *n_ptr)
+static inline void tipc_node_unlock(struct node *n_ptr)
 {
        spin_unlock_bh(&n_ptr->lock);
 }
index 79375927916f7d10e58d1ac2f5763dfa22c336ef..afeea121d8be0c12796754f004d6a8028aec732a 100644 (file)
 #include "addr.h"
 
 /**
- * nodesub_subscribe - create "node down" subscription for specified node
+ * tipc_nodesub_subscribe - create "node down" subscription for specified node
  */
 
-void nodesub_subscribe(struct node_subscr *node_sub, u32 addr, 
+void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr, 
                       void *usr_handle, net_ev_handler handle_down)
 {
        node_sub->node = 0;
        if (addr == tipc_own_addr)
                return;
-       if (!addr_node_valid(addr)) {
+       if (!tipc_addr_node_valid(addr)) {
                warn("node_subscr with illegal %x\n", addr);
                return;
        }
 
        node_sub->handle_node_down = handle_down;
        node_sub->usr_handle = usr_handle;
-       node_sub->node = node_find(addr);
+       node_sub->node = tipc_node_find(addr);
        assert(node_sub->node);
-       node_lock(node_sub->node);
+       tipc_node_lock(node_sub->node);
        list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
-       node_unlock(node_sub->node);
+       tipc_node_unlock(node_sub->node);
 }
 
 /**
- * nodesub_unsubscribe - cancel "node down" subscription (if any)
+ * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
  */
 
-void nodesub_unsubscribe(struct node_subscr *node_sub)
+void tipc_nodesub_unsubscribe(struct node_subscr *node_sub)
 {
        if (!node_sub->node)
                return;
 
-       node_lock(node_sub->node);
+       tipc_node_lock(node_sub->node);
        list_del_init(&node_sub->nodesub_list);
-       node_unlock(node_sub->node);
+       tipc_node_unlock(node_sub->node);
 }
index a3b87ac4859b7d66490765385f8f48af9f45bf42..01751c4fbb43ec10d40f8be516d18e2380d17df9 100644 (file)
@@ -56,8 +56,8 @@ struct node_subscr {
        struct list_head nodesub_list;
 };
 
-void nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
-                      void *usr_handle, net_ev_handler handle_down);
-void nodesub_unsubscribe(struct node_subscr *node_sub);
+void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
+                           void *usr_handle, net_ev_handler handle_down);
+void tipc_nodesub_unsubscribe(struct node_subscr *node_sub);
 
 #endif
index 66caca7abe92215cddba264ecd8d1408150e4f04..72aae52bfec1849902d4293c3ddc447f38b96190 100644 (file)
 static struct sk_buff *msg_queue_head = 0;
 static struct sk_buff *msg_queue_tail = 0;
 
-spinlock_t port_list_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED;
 static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
 
-LIST_HEAD(ports);
+static LIST_HEAD(ports);
 static void port_handle_node_down(unsigned long ref);
 static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
 static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
@@ -107,7 +107,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
        struct sk_buff *buf;
        struct sk_buff *ibuf = NULL;
        struct port_list dports = {0, NULL, };
-       struct port *oport = port_deref(ref);
+       struct port *oport = tipc_port_deref(ref);
        int ext_targets;
        int res;
 
@@ -129,8 +129,8 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
 
        /* Figure out where to send multicast message */
 
-       ext_targets = nametbl_mc_translate(seq->type, seq->lower, seq->upper,
-                                          TIPC_NODE_SCOPE, &dports);
+       ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
+                                               TIPC_NODE_SCOPE, &dports);
        
        /* Send message to destinations (duplicate it only if necessary) */ 
 
@@ -138,12 +138,12 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
                if (dports.count != 0) {
                        ibuf = skb_copy(buf, GFP_ATOMIC);
                        if (ibuf == NULL) {
-                               port_list_free(&dports);
+                               tipc_port_list_free(&dports);
                                buf_discard(buf);
                                return -ENOMEM;
                        }
                }
-               res = bclink_send_msg(buf);
+               res = tipc_bclink_send_msg(buf);
                if ((res < 0) && (dports.count != 0)) {
                        buf_discard(ibuf);
                }
@@ -153,20 +153,20 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
 
        if (res >= 0) {
                if (ibuf)
-                       port_recv_mcast(ibuf, &dports);
+                       tipc_port_recv_mcast(ibuf, &dports);
        } else {
-               port_list_free(&dports);
+               tipc_port_list_free(&dports);
        }
        return res;
 }
 
 /**
- * port_recv_mcast - deliver multicast message to all destination ports
+ * tipc_port_recv_mcast - deliver multicast message to all destination ports
  * 
  * If there is no port list, perform a lookup to create one
  */
 
-void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
+void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
 {
        struct tipc_msg* msg;
        struct port_list dports = {0, NULL, };
@@ -179,7 +179,7 @@ void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
        /* Create destination port list, if one wasn't supplied */
 
        if (dp == NULL) {
-               nametbl_mc_translate(msg_nametype(msg),
+               tipc_nametbl_mc_translate(msg_nametype(msg),
                                     msg_namelower(msg),
                                     msg_nameupper(msg),
                                     TIPC_CLUSTER_SCOPE,
@@ -192,8 +192,8 @@ void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
        if (dp->count != 0) {
                if (dp->count == 1) {
                        msg_set_destport(msg, dp->ports[0]);
-                       port_recv_msg(buf);
-                       port_list_free(dp);
+                       tipc_port_recv_msg(buf);
+                       tipc_port_list_free(dp);
                        return;
                }
                for (; cnt < dp->count; cnt++) {
@@ -209,12 +209,12 @@ void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
                                item = item->next;
                        }
                        msg_set_destport(buf_msg(b),item->ports[index]);
-                       port_recv_msg(b);
+                       tipc_port_recv_msg(b);
                }
        }
 exit:
        buf_discard(buf);
-       port_list_free(dp);
+       tipc_port_list_free(dp);
 }
 
 /**
@@ -238,14 +238,14 @@ u32 tipc_createport_raw(void *usr_handle,
                return 0;
        }
        memset(p_ptr, 0, sizeof(*p_ptr));
-       ref = ref_acquire(p_ptr, &p_ptr->publ.lock);
+       ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
        if (!ref) {
                warn("Reference Table Exhausted\n");
                kfree(p_ptr);
                return 0;
        }
 
-       port_lock(ref);
+       tipc_port_lock(ref);
        p_ptr->publ.ref = ref;
        msg = &p_ptr->publ.phdr;
        msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0);
@@ -264,12 +264,12 @@ u32 tipc_createport_raw(void *usr_handle,
        p_ptr->wakeup = wakeup;
        p_ptr->user_port = 0;
        k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
-       spin_lock_bh(&port_list_lock);
+       spin_lock_bh(&tipc_port_list_lock);
        INIT_LIST_HEAD(&p_ptr->publications);
        INIT_LIST_HEAD(&p_ptr->port_list);
        list_add_tail(&p_ptr->port_list, &ports);
-       spin_unlock_bh(&port_list_lock);
-       port_unlock(p_ptr);
+       spin_unlock_bh(&tipc_port_list_lock);
+       tipc_port_unlock(p_ptr);
        return ref;
 }
 
@@ -279,31 +279,31 @@ int tipc_deleteport(u32 ref)
        struct sk_buff *buf = 0;
 
        tipc_withdraw(ref, 0, 0);
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr) 
                return -EINVAL;
 
-       ref_discard(ref);
-       port_unlock(p_ptr);
+       tipc_ref_discard(ref);
+       tipc_port_unlock(p_ptr);
 
        k_cancel_timer(&p_ptr->timer);
        if (p_ptr->publ.connected) {
                buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
-               nodesub_unsubscribe(&p_ptr->subscription);
+               tipc_nodesub_unsubscribe(&p_ptr->subscription);
        }
        if (p_ptr->user_port) {
-               reg_remove_port(p_ptr->user_port);
+               tipc_reg_remove_port(p_ptr->user_port);
                kfree(p_ptr->user_port);
        }
 
-       spin_lock_bh(&port_list_lock);
+       spin_lock_bh(&tipc_port_list_lock);
        list_del(&p_ptr->port_list);
        list_del(&p_ptr->wait_list);
-       spin_unlock_bh(&port_list_lock);
+       spin_unlock_bh(&tipc_port_list_lock);
        k_term_timer(&p_ptr->timer);
        kfree(p_ptr);
        dbg("Deleted port %u\n", ref);
-       net_route_msg(buf);
+       tipc_net_route_msg(buf);
        return TIPC_OK;
 }
 
@@ -315,7 +315,7 @@ int tipc_deleteport(u32 ref)
 
 struct tipc_port *tipc_get_port(const u32 ref)
 {
-       return (struct tipc_port *)ref_deref(ref);
+       return (struct tipc_port *)tipc_ref_deref(ref);
 }
 
 /**
@@ -327,11 +327,11 @@ void *tipc_get_handle(const u32 ref)
        struct port *p_ptr;
        void * handle;
 
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return 0;
        handle = p_ptr->publ.usr_handle;
-       port_unlock(p_ptr);
+       tipc_port_unlock(p_ptr);
        return handle;
 }
 
@@ -344,7 +344,7 @@ int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
 {
        struct port *p_ptr;
        
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
        *isunreliable = port_unreliable(p_ptr);
@@ -356,11 +356,11 @@ int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
 {
        struct port *p_ptr;
        
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
        msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
-       port_unlock(p_ptr);
+       tipc_port_unlock(p_ptr);
        return TIPC_OK;
 }
 
@@ -373,7 +373,7 @@ int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
 {
        struct port *p_ptr;
        
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
        *isunrejectable = port_unreturnable(p_ptr);
@@ -385,11 +385,11 @@ int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
 {
        struct port *p_ptr;
        
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
        msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
-       port_unlock(p_ptr);
+       tipc_port_unlock(p_ptr);
        return TIPC_OK;
 }
 
@@ -476,25 +476,25 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
        /* send self-abort message when rejecting on a connected port */
        if (msg_connected(msg)) {
                struct sk_buff *abuf = 0;
-               struct port *p_ptr = port_lock(msg_destport(msg));
+               struct port *p_ptr = tipc_port_lock(msg_destport(msg));
 
                if (p_ptr) {
                        if (p_ptr->publ.connected)
                                abuf = port_build_self_abort_msg(p_ptr, err);
-                       port_unlock(p_ptr);
+                       tipc_port_unlock(p_ptr);
                }
-               net_route_msg(abuf);
+               tipc_net_route_msg(abuf);
        }
 
        /* send rejected message */
        buf_discard(buf);
-       net_route_msg(rbuf);
+       tipc_net_route_msg(rbuf);
        return data_sz;
 }
 
-int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
-                        struct iovec const *msg_sect, u32 num_sect,
-                        int err)
+int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+                             struct iovec const *msg_sect, u32 num_sect,
+                             int err)
 {
        struct sk_buff *buf;
        int res;
@@ -509,7 +509,7 @@ int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
 
 static void port_timeout(unsigned long ref)
 {
-       struct port *p_ptr = port_lock(ref);
+       struct port *p_ptr = tipc_port_lock(ref);
        struct sk_buff *buf = 0;
 
        if (!p_ptr || !p_ptr->publ.connected)
@@ -532,21 +532,21 @@ static void port_timeout(unsigned long ref)
                p_ptr->probing_state = PROBING;
                k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
        }
-       port_unlock(p_ptr);
-       net_route_msg(buf);
+       tipc_port_unlock(p_ptr);
+       tipc_net_route_msg(buf);
 }
 
 
 static void port_handle_node_down(unsigned long ref)
 {
-       struct port *p_ptr = port_lock(ref);
+       struct port *p_ptr = tipc_port_lock(ref);
        struct sk_buff* buf = 0;
 
        if (!p_ptr)
                return;
        buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
-       port_unlock(p_ptr);
-       net_route_msg(buf);
+       tipc_port_unlock(p_ptr);
+       tipc_net_route_msg(buf);
 }
 
 
@@ -589,10 +589,10 @@ static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
                                    0);
 }
 
-void port_recv_proto_msg(struct sk_buff *buf)
+void tipc_port_recv_proto_msg(struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
-       struct port *p_ptr = port_lock(msg_destport(msg));
+       struct port *p_ptr = tipc_port_lock(msg_destport(msg));
        u32 err = TIPC_OK;
        struct sk_buff *r_buf = 0;
        struct sk_buff *abort_buf = 0;
@@ -615,11 +615,11 @@ void port_recv_proto_msg(struct sk_buff *buf)
                        }
                }
                if (msg_type(msg) == CONN_ACK) {
-                       int wakeup = port_congested(p_ptr) && 
+                       int wakeup = tipc_port_congested(p_ptr) && 
                                     p_ptr->publ.congested &&
                                     p_ptr->wakeup;
                        p_ptr->acked += msg_msgcnt(msg);
-                       if (port_congested(p_ptr))
+                       if (tipc_port_congested(p_ptr))
                                goto exit;
                        p_ptr->publ.congested = 0;
                        if (!wakeup)
@@ -659,9 +659,9 @@ void port_recv_proto_msg(struct sk_buff *buf)
        port_incr_out_seqno(p_ptr);
 exit:
        if (p_ptr)
-               port_unlock(p_ptr);
-       net_route_msg(r_buf);
-       net_route_msg(abort_buf);
+               tipc_port_unlock(p_ptr);
+       tipc_net_route_msg(r_buf);
+       tipc_net_route_msg(abort_buf);
        buf_discard(buf);
 }
 
@@ -704,7 +704,7 @@ static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
 
 #define MAX_PORT_QUERY 32768
 
-struct sk_buff *port_get_ports(void)
+struct sk_buff *tipc_port_get_ports(void)
 {
        struct sk_buff *buf;
        struct tlv_desc *rep_tlv;
@@ -712,20 +712,20 @@ struct sk_buff *port_get_ports(void)
        struct port *p_ptr;
        int str_len;
 
-       buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
+       buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
        if (!buf)
                return NULL;
        rep_tlv = (struct tlv_desc *)buf->data;
 
-       printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
-       spin_lock_bh(&port_list_lock);
+       tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
+       spin_lock_bh(&tipc_port_list_lock);
        list_for_each_entry(p_ptr, &ports, port_list) {
                spin_lock_bh(p_ptr->publ.lock);
                port_print(p_ptr, &pb, 0);
                spin_unlock_bh(p_ptr->publ.lock);
        }
-       spin_unlock_bh(&port_list_lock);
-       str_len = printbuf_validate(&pb);
+       spin_unlock_bh(&tipc_port_list_lock);
+       str_len = tipc_printbuf_validate(&pb);
 
        skb_put(buf, TLV_SPACE(str_len));
        TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -752,22 +752,22 @@ struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
        ref = *(u32 *)TLV_DATA(req_tlv_area);
        ref = ntohl(ref);
 
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return cfg_reply_error_string("port not found");
 
-       buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS));
+       buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS));
        if (!buf) {
-               port_unlock(p_ptr);
+               tipc_port_unlock(p_ptr);
                return NULL;
        }
        rep_tlv = (struct tlv_desc *)buf->data;
 
-       printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS);
+       tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS);
        port_print(p_ptr, &pb, 1);
        /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */
-       port_unlock(p_ptr);
-       str_len = printbuf_validate(&pb);
+       tipc_port_unlock(p_ptr);
+       str_len = tipc_printbuf_validate(&pb);
 
        skb_put(buf, TLV_SPACE(str_len));
        TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -777,19 +777,19 @@ struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
 
 #endif
 
-void port_reinit(void)
+void tipc_port_reinit(void)
 {
        struct port *p_ptr;
        struct tipc_msg *msg;
 
-       spin_lock_bh(&port_list_lock);
+       spin_lock_bh(&tipc_port_list_lock);
        list_for_each_entry(p_ptr, &ports, port_list) {
                msg = &p_ptr->publ.phdr;
                if (msg_orignode(msg) == tipc_own_addr)
                        break;
                msg_set_orignode(msg, tipc_own_addr);
        }
-       spin_unlock_bh(&port_list_lock);
+       spin_unlock_bh(&tipc_port_list_lock);
 }
 
 
@@ -820,7 +820,7 @@ static void port_dispatcher_sigh(void *dummy)
                struct tipc_msg *msg = buf_msg(buf);
                u32 dref = msg_destport(msg);
                
-               p_ptr = port_lock(dref);
+               p_ptr = tipc_port_lock(dref);
                if (!p_ptr) {
                        /* Port deleted while msg in queue */
                        tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
@@ -976,7 +976,7 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
                msg_queue_tail = buf;
        } else {
                msg_queue_tail = msg_queue_head = buf;
-               k_signal((Handler)port_dispatcher_sigh, 0);
+               tipc_k_signal((Handler)port_dispatcher_sigh, 0);
        }
        spin_unlock_bh(&queue_lock);
        return TIPC_OK;
@@ -994,14 +994,14 @@ static void port_wakeup_sh(unsigned long ref)
        tipc_continue_event cb = 0;
        void *uh = 0;
 
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (p_ptr) {
                up_ptr = p_ptr->user_port;
                if (up_ptr) {
                        cb = up_ptr->continue_event_cb;
                        uh = up_ptr->usr_handle;
                }
-               port_unlock(p_ptr);
+               tipc_port_unlock(p_ptr);
        }
        if (cb)
                cb(uh, ref);
@@ -1010,7 +1010,7 @@ static void port_wakeup_sh(unsigned long ref)
 
 static void port_wakeup(struct tipc_port *p_ptr)
 {
-       k_signal((Handler)port_wakeup_sh, p_ptr->ref);
+       tipc_k_signal((Handler)port_wakeup_sh, p_ptr->ref);
 }
 
 void tipc_acknowledge(u32 ref, u32 ack)
@@ -1018,7 +1018,7 @@ void tipc_acknowledge(u32 ref, u32 ack)
        struct port *p_ptr;
        struct sk_buff *buf = 0;
 
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return;
        if (p_ptr->publ.connected) {
@@ -1033,8 +1033,8 @@ void tipc_acknowledge(u32 ref, u32 ack)
                                           port_out_seqno(p_ptr),
                                           ack);
        }
-       port_unlock(p_ptr);
-       net_route_msg(buf);
+       tipc_port_unlock(p_ptr);
+       tipc_net_route_msg(buf);
 }
 
 /*
@@ -1063,7 +1063,7 @@ int tipc_createport(u32 user_ref,
                return -ENOMEM;
        }
        ref = tipc_createport_raw(0, port_dispatcher, port_wakeup, importance);
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr) {
                kfree(up_ptr);
                return -ENOMEM;
@@ -1081,10 +1081,10 @@ int tipc_createport(u32 user_ref,
        up_ptr->conn_msg_cb = conn_msg_cb;
        up_ptr->continue_event_cb = continue_event_cb;
        INIT_LIST_HEAD(&up_ptr->uport_list);
-       reg_add_port(up_ptr);
+       tipc_reg_add_port(up_ptr);
        *portref = p_ptr->publ.ref;
        dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);        
-       port_unlock(p_ptr);
+       tipc_port_unlock(p_ptr);
        return TIPC_OK;
 }
 
@@ -1099,7 +1099,7 @@ int tipc_portimportance(u32 ref, unsigned int *importance)
 {
        struct port *p_ptr;
        
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
        *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
@@ -1114,7 +1114,7 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
        if (imp > TIPC_CRITICAL_IMPORTANCE)
                return -EINVAL;
 
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
        msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
@@ -1130,7 +1130,7 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
        u32 key;
        int res = -EINVAL;
 
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
            "lower = %u, upper = %u\n",
            ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
@@ -1147,8 +1147,8 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
                res = -EADDRINUSE;
                goto exit;
        }
-       publ = nametbl_publish(seq->type, seq->lower, seq->upper,
-                              scope, p_ptr->publ.ref, key);
+       publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
+                                   scope, p_ptr->publ.ref, key);
        if (publ) {
                list_add(&publ->pport_list, &p_ptr->publications);
                p_ptr->pub_count++;
@@ -1156,7 +1156,7 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
                res = TIPC_OK;
        }
 exit:
-       port_unlock(p_ptr);
+       tipc_port_unlock(p_ptr);
        return res;
 }
 
@@ -1167,7 +1167,7 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
        struct publication *tpubl;
        int res = -EINVAL;
        
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
        if (!p_ptr->publ.published)
@@ -1175,8 +1175,8 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
        if (!seq) {
                list_for_each_entry_safe(publ, tpubl, 
                                         &p_ptr->publications, pport_list) {
-                       nametbl_withdraw(publ->type, publ->lower, 
-                                        publ->ref, publ->key);
+                       tipc_nametbl_withdraw(publ->type, publ->lower, 
+                                             publ->ref, publ->key);
                }
                res = TIPC_OK;
        } else {
@@ -1190,8 +1190,8 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
                                continue;
                        if (publ->upper != seq->upper)
                                break;
-                       nametbl_withdraw(publ->type, publ->lower, 
-                                        publ->ref, publ->key);
+                       tipc_nametbl_withdraw(publ->type, publ->lower, 
+                                             publ->ref, publ->key);
                        res = TIPC_OK;
                        break;
                }
@@ -1199,7 +1199,7 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
        if (list_empty(&p_ptr->publications))
                p_ptr->publ.published = 0;
 exit:
-       port_unlock(p_ptr);
+       tipc_port_unlock(p_ptr);
        return res;
 }
 
@@ -1209,7 +1209,7 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
        struct tipc_msg *msg;
        int res = -EINVAL;
 
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
        if (p_ptr->publ.published || p_ptr->publ.connected)
@@ -1234,13 +1234,13 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
        p_ptr->publ.connected = 1;
        k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
 
-       nodesub_subscribe(&p_ptr->subscription,peer->node,
+       tipc_nodesub_subscribe(&p_ptr->subscription,peer->node,
                          (void *)(unsigned long)ref,
                          (net_ev_handler)port_handle_node_down);
        res = TIPC_OK;
 exit:
-       port_unlock(p_ptr);
-       p_ptr->max_pkt = link_get_max_pkt(peer->node, ref);
+       tipc_port_unlock(p_ptr);
+       p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref);
        return res;
 }
 
@@ -1254,16 +1254,16 @@ int tipc_disconnect(u32 ref)
        struct port *p_ptr;
        int res = -ENOTCONN;
 
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
        if (p_ptr->publ.connected) {
                p_ptr->publ.connected = 0;
                /* let timer expire on it's own to avoid deadlock! */
-               nodesub_unsubscribe(&p_ptr->subscription);
+               tipc_nodesub_unsubscribe(&p_ptr->subscription);
                res = TIPC_OK;
        }
-       port_unlock(p_ptr);
+       tipc_port_unlock(p_ptr);
        return res;
 }
 
@@ -1275,7 +1275,7 @@ int tipc_shutdown(u32 ref)
        struct port *p_ptr;
        struct sk_buff *buf = 0;
 
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
 
@@ -1293,8 +1293,8 @@ int tipc_shutdown(u32 ref)
                                           port_out_seqno(p_ptr),
                                           0);
        }
-       port_unlock(p_ptr);
-       net_route_msg(buf);
+       tipc_port_unlock(p_ptr);
+       tipc_net_route_msg(buf);
        return tipc_disconnect(ref);
 }
 
@@ -1302,11 +1302,11 @@ int tipc_isconnected(u32 ref, int *isconnected)
 {
        struct port *p_ptr;
        
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
        *isconnected = p_ptr->publ.connected;
-       port_unlock(p_ptr);
+       tipc_port_unlock(p_ptr);
        return TIPC_OK;
 }
 
@@ -1315,7 +1315,7 @@ int tipc_peer(u32 ref, struct tipc_portid *peer)
        struct port *p_ptr;
        int res;
         
-       p_ptr = port_lock(ref);
+       p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
        if (p_ptr->publ.connected) {
@@ -1324,23 +1324,23 @@ int tipc_peer(u32 ref, struct tipc_portid *peer)
                res = TIPC_OK;
        } else
                res = -ENOTCONN;
-       port_unlock(p_ptr);
+       tipc_port_unlock(p_ptr);
        return res;
 }
 
 int tipc_ref_valid(u32 ref)
 {
        /* Works irrespective of type */
-       return !!ref_deref(ref);
+       return !!tipc_ref_deref(ref);
 }
 
 
 /*
- *  port_recv_sections(): Concatenate and deliver sectioned
+ *  tipc_port_recv_sections(): Concatenate and deliver sectioned
  *                        message for this node.
  */
 
-int port_recv_sections(struct port *sender, unsigned int num_sect,
+int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
                       struct iovec const *msg_sect)
 {
        struct sk_buff *buf;
@@ -1349,7 +1349,7 @@ int port_recv_sections(struct port *sender, unsigned int num_sect,
        res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
                        MAX_MSG_SIZE, !sender->user_port, &buf);
        if (likely(buf))
-               port_recv_msg(buf);
+               tipc_port_recv_msg(buf);
        return res;
 }
 
@@ -1363,18 +1363,18 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
        u32 destnode;
        int res;
 
-       p_ptr = port_deref(ref);
+       p_ptr = tipc_port_deref(ref);
        if (!p_ptr || !p_ptr->publ.connected)
                return -EINVAL;
 
        p_ptr->publ.congested = 1;
-       if (!port_congested(p_ptr)) {
+       if (!tipc_port_congested(p_ptr)) {
                destnode = port_peernode(p_ptr);
                if (likely(destnode != tipc_own_addr))
-                       res = link_send_sections_fast(p_ptr, msg_sect, num_sect,
-                                                     destnode);
+                       res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
+                                                          destnode);
                else
-                       res = port_recv_sections(p_ptr, num_sect, msg_sect);
+                       res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
 
                if (likely(res != -ELINKCONG)) {
                        port_incr_out_seqno(p_ptr);
@@ -1404,7 +1404,7 @@ int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
        u32 sz;
        u32 res;
         
-       p_ptr = port_deref(ref);
+       p_ptr = tipc_port_deref(ref);
        if (!p_ptr || !p_ptr->publ.connected)
                return -EINVAL;
 
@@ -1419,11 +1419,11 @@ int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
        memcpy(buf->data, (unchar *)msg, hsz);
        destnode = msg_destnode(msg);
        p_ptr->publ.congested = 1;
-       if (!port_congested(p_ptr)) {
+       if (!tipc_port_congested(p_ptr)) {
                if (likely(destnode != tipc_own_addr))
                        res = tipc_send_buf_fast(buf, destnode);
                else {
-                       port_recv_msg(buf);
+                       tipc_port_recv_msg(buf);
                        res = sz;
                }
                if (likely(res != -ELINKCONG)) {
@@ -1458,7 +1458,7 @@ int tipc_forward2name(u32 ref,
        u32 destport = 0;
        int res;
 
-       p_ptr = port_deref(ref);
+       p_ptr = tipc_port_deref(ref);
        if (!p_ptr || p_ptr->publ.connected)
                return -EINVAL;
 
@@ -1472,16 +1472,16 @@ int tipc_forward2name(u32 ref,
        msg_set_lookup_scope(msg, addr_scope(domain));
        if (importance <= TIPC_CRITICAL_IMPORTANCE)
                msg_set_importance(msg,importance);
-       destport = nametbl_translate(name->type, name->instance, &destnode);
+       destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
        msg_set_destnode(msg, destnode);
        msg_set_destport(msg, destport);
 
        if (likely(destport || destnode)) {
                p_ptr->sent++;
                if (likely(destnode == tipc_own_addr))
-                       return port_recv_sections(p_ptr, num_sect, msg_sect);
-               res = link_send_sections_fast(p_ptr, msg_sect, num_sect, 
-                                             destnode);
+                       return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
+               res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, 
+                                                  destnode);
                if (likely(res != -ELINKCONG))
                        return res;
                if (port_unreliable(p_ptr)) {
@@ -1490,8 +1490,8 @@ int tipc_forward2name(u32 ref,
                }
                return -ELINKCONG;
        }
-       return port_reject_sections(p_ptr, msg, msg_sect, num_sect, 
-                                   TIPC_ERR_NO_NAME);
+       return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect, 
+                                        TIPC_ERR_NO_NAME);
 }
 
 /**
@@ -1530,7 +1530,7 @@ int tipc_forward_buf2name(u32 ref,
        u32 destport = 0;
        int res;
 
-       p_ptr = (struct port *)ref_deref(ref);
+       p_ptr = (struct port *)tipc_ref_deref(ref);
        if (!p_ptr || p_ptr->publ.connected)
                return -EINVAL;
 
@@ -1545,7 +1545,7 @@ int tipc_forward_buf2name(u32 ref,
        msg_set_lookup_scope(msg, addr_scope(domain));
        msg_set_hdr_sz(msg, LONG_H_SIZE);
        msg_set_size(msg, LONG_H_SIZE + dsz);
-       destport = nametbl_translate(name->type, name->instance, &destnode);
+       destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
        msg_set_destnode(msg, destnode);
        msg_set_destport(msg, destport);
        msg_dbg(msg, "forw2name ==> ");
@@ -1557,7 +1557,7 @@ int tipc_forward_buf2name(u32 ref,
        if (likely(destport || destnode)) {
                p_ptr->sent++;
                if (destnode == tipc_own_addr)
-                       return port_recv_msg(buf);
+                       return tipc_port_recv_msg(buf);
                res = tipc_send_buf_fast(buf, destnode);
                if (likely(res != -ELINKCONG))
                        return res;
@@ -1601,7 +1601,7 @@ int tipc_forward2port(u32 ref,
        struct tipc_msg *msg;
        int res;
 
-       p_ptr = port_deref(ref);
+       p_ptr = tipc_port_deref(ref);
        if (!p_ptr || p_ptr->publ.connected)
                return -EINVAL;
 
@@ -1616,8 +1616,8 @@ int tipc_forward2port(u32 ref,
                msg_set_importance(msg, importance);
        p_ptr->sent++;
        if (dest->node == tipc_own_addr)
-               return port_recv_sections(p_ptr, num_sect, msg_sect);
-       res = link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
+               return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
+       res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
        if (likely(res != -ELINKCONG))
                return res;
        if (port_unreliable(p_ptr)) {
@@ -1658,7 +1658,7 @@ int tipc_forward_buf2port(u32 ref,
        struct tipc_msg *msg;
        int res;
 
-       p_ptr = (struct port *)ref_deref(ref);
+       p_ptr = (struct port *)tipc_ref_deref(ref);
        if (!p_ptr || p_ptr->publ.connected)
                return -EINVAL;
 
@@ -1680,7 +1680,7 @@ int tipc_forward_buf2port(u32 ref,
        msg_dbg(msg, "buf2port: ");
        p_ptr->sent++;
        if (dest->node == tipc_own_addr)
-               return port_recv_msg(buf);
+               return tipc_port_recv_msg(buf);
        res = tipc_send_buf_fast(buf, dest->node);
        if (likely(res != -ELINKCONG))
                return res;
index e829a99d3b7fd904af1703aa808055044040acff..839f100da646e8d8fc7c4a3bc6b35635bdd78963 100644 (file)
@@ -37,7 +37,7 @@
 #ifndef _TIPC_PORT_H
 #define _TIPC_PORT_H
 
-#include <net/tipc/tipc_port.h>
+#include "core.h"
 #include "ref.h"
 #include "net.h"
 #include "msg.h"
@@ -110,65 +110,65 @@ struct port {
        struct node_subscr subscription;
 };
 
-extern spinlock_t port_list_lock;
+extern spinlock_t tipc_port_list_lock;
 struct port_list;
 
-int port_recv_sections(struct port *p_ptr, u32 num_sect, 
-                      struct iovec const *msg_sect);
-int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
-                        struct iovec const *msg_sect, u32 num_sect,
-                        int err);
-struct sk_buff *port_get_ports(void);
+int tipc_port_recv_sections(struct port *p_ptr, u32 num_sect, 
+                           struct iovec const *msg_sect);
+int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+                             struct iovec const *msg_sect, u32 num_sect,
+                             int err);
+struct sk_buff *tipc_port_get_ports(void);
 struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space);
-void port_recv_proto_msg(struct sk_buff *buf);
-void port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
-void port_reinit(void);
+void tipc_port_recv_proto_msg(struct sk_buff *buf);
+void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
+void tipc_port_reinit(void);
 
 /**
- * port_lock - lock port instance referred to and return its pointer
+ * tipc_port_lock - lock port instance referred to and return its pointer
  */
 
-static inline struct port *port_lock(u32 ref)
+static inline struct port *tipc_port_lock(u32 ref)
 {
-       return (struct port *)ref_lock(ref);
+       return (struct port *)tipc_ref_lock(ref);
 }
 
 /** 
- * port_unlock - unlock a port instance
+ * tipc_port_unlock - unlock a port instance
  * 
- * Can use pointer instead of ref_unlock() since port is already locked.
+ * Can use pointer instead of tipc_ref_unlock() since port is already locked.
  */
 
-static inline void port_unlock(struct port *p_ptr)
+static inline void tipc_port_unlock(struct port *p_ptr)
 {
        spin_unlock_bh(p_ptr->publ.lock);
 }
 
-static inline struct port* port_deref(u32 ref)
+static inline struct port* tipc_port_deref(u32 ref)
 {
-       return (struct port *)ref_deref(ref);
+       return (struct port *)tipc_ref_deref(ref);
 }
 
-static inline u32 peer_port(struct port *p_ptr)
+static inline u32 tipc_peer_port(struct port *p_ptr)
 {
        return msg_destport(&p_ptr->publ.phdr);
 }
 
-static inline u32 peer_node(struct port *p_ptr)
+static inline u32 tipc_peer_node(struct port *p_ptr)
 {
        return msg_destnode(&p_ptr->publ.phdr);
 }
 
-static inline int port_congested(struct port *p_ptr)
+static inline int tipc_port_congested(struct port *p_ptr)
 {
        return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2));
 }
 
 /** 
- * port_recv_msg - receive message from lower layer and deliver to port user
+ * tipc_port_recv_msg - receive message from lower layer and deliver to port user
  */
 
-static inline int port_recv_msg(struct sk_buff *buf)
+static inline int tipc_port_recv_msg(struct sk_buff *buf)
 {
        struct port *p_ptr;
        struct tipc_msg *msg = buf_msg(buf);
@@ -178,24 +178,24 @@ static inline int port_recv_msg(struct sk_buff *buf)
        
        /* forward unresolved named message */
        if (unlikely(!destport)) {
-               net_route_msg(buf);
+               tipc_net_route_msg(buf);
                return dsz;
        }
 
        /* validate destination & pass to port, otherwise reject message */
-       p_ptr = port_lock(destport);
+       p_ptr = tipc_port_lock(destport);
        if (likely(p_ptr)) {
                if (likely(p_ptr->publ.connected)) {
-                       if ((unlikely(msg_origport(msg) != peer_port(p_ptr))) ||
-                           (unlikely(msg_orignode(msg) != peer_node(p_ptr))) ||
+                       if ((unlikely(msg_origport(msg) != tipc_peer_port(p_ptr))) ||
+                           (unlikely(msg_orignode(msg) != tipc_peer_node(p_ptr))) ||
                            (unlikely(!msg_connected(msg)))) {
                                err = TIPC_ERR_NO_PORT;
-                               port_unlock(p_ptr);
+                               tipc_port_unlock(p_ptr);
                                goto reject;
                        }
                }
                err = p_ptr->dispatcher(&p_ptr->publ, buf);
-               port_unlock(p_ptr);
+               tipc_port_unlock(p_ptr);
                if (likely(!err))
                        return dsz;
        } else {
index 944093fe246f1dc4025c17dbb8ae10b0ecf08b21..5a13c2defe4a2039897a2a1c9278dccb073becea 100644 (file)
  * because entry 0's reference field has the form XXXX|1--1.
  */
 
-struct ref_table ref_table = { 0 };
+struct ref_table tipc_ref_table = { 0 };
 
-rwlock_t reftbl_lock = RW_LOCK_UNLOCKED;
+static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED;
 
 /**
- * ref_table_init - create reference table for objects
+ * tipc_ref_table_init - create reference table for objects
  */
 
-int ref_table_init(u32 requested_size, u32 start)
+int tipc_ref_table_init(u32 requested_size, u32 start)
 {
        struct reference *table;
        u32 sz = 1 << 4;
@@ -83,43 +83,43 @@ int ref_table_init(u32 requested_size, u32 start)
        if (table == NULL)
                return -ENOMEM;
 
-       write_lock_bh(&reftbl_lock);
+       write_lock_bh(&ref_table_lock);
        index_mask = sz - 1;
        for (i = sz - 1; i >= 0; i--) {
                table[i].object = 0;
                table[i].lock = SPIN_LOCK_UNLOCKED;
                table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
        }
-       ref_table.entries = table;
-       ref_table.index_mask = index_mask;
-       ref_table.first_free = sz - 1;
-       ref_table.last_free = 1;
-       write_unlock_bh(&reftbl_lock);
+       tipc_ref_table.entries = table;
+       tipc_ref_table.index_mask = index_mask;
+       tipc_ref_table.first_free = sz - 1;
+       tipc_ref_table.last_free = 1;
+       write_unlock_bh(&ref_table_lock);
        return TIPC_OK;
 }
 
 /**
- * ref_table_stop - destroy reference table for objects
+ * tipc_ref_table_stop - destroy reference table for objects
  */
 
-void ref_table_stop(void)
+void tipc_ref_table_stop(void)
 {
-       if (!ref_table.entries)
+       if (!tipc_ref_table.entries)
                return;
 
-       vfree(ref_table.entries);
-       ref_table.entries = 0;
+       vfree(tipc_ref_table.entries);
+       tipc_ref_table.entries = 0;
 }
 
 /**
- * ref_acquire - create reference to an object
+ * tipc_ref_acquire - create reference to an object
  * 
  * Return a unique reference value which can be translated back to the pointer
  * 'object' at a later time.  Also, pass back a pointer to the lock protecting 
  * the object, but without locking it.
  */
 
-u32 ref_acquire(void *object, spinlock_t **lock)
+u32 tipc_ref_acquire(void *object, spinlock_t **lock)
 {
        struct reference *entry;
        u32 index;
@@ -127,17 +127,17 @@ u32 ref_acquire(void *object, spinlock_t **lock)
        u32 next_plus_upper;
        u32 reference = 0;
 
-       assert(ref_table.entries && object);
+       assert(tipc_ref_table.entries && object);
 
-       write_lock_bh(&reftbl_lock);
-       if (ref_table.first_free) {
-               index = ref_table.first_free;
-               entry = &(ref_table.entries[index]);
-               index_mask = ref_table.index_mask;
+       write_lock_bh(&ref_table_lock);
+       if (tipc_ref_table.first_free) {
+               index = tipc_ref_table.first_free;
+               entry = &(tipc_ref_table.entries[index]);
+               index_mask = tipc_ref_table.index_mask;
                /* take lock in case a previous user of entry still holds it */ 
                spin_lock_bh(&entry->lock);  
                next_plus_upper = entry->data.next_plus_upper;
-               ref_table.first_free = next_plus_upper & index_mask;
+               tipc_ref_table.first_free = next_plus_upper & index_mask;
                reference = (next_plus_upper & ~index_mask) + index;
                entry->data.reference = reference;
                entry->object = object;
@@ -145,45 +145,45 @@ u32 ref_acquire(void *object, spinlock_t **lock)
                         *lock = &entry->lock;
                spin_unlock_bh(&entry->lock);
        }
-       write_unlock_bh(&reftbl_lock);
+       write_unlock_bh(&ref_table_lock);
        return reference;
 }
 
 /**
- * ref_discard - invalidate references to an object
+ * tipc_ref_discard - invalidate references to an object
  * 
  * Disallow future references to an object and free up the entry for re-use.
  * Note: The entry's spin_lock may still be busy after discard
  */
 
-void ref_discard(u32 ref)
+void tipc_ref_discard(u32 ref)
 {
        struct reference *entry;
        u32 index; 
        u32 index_mask;
 
-       assert(ref_table.entries);
+       assert(tipc_ref_table.entries);
        assert(ref != 0);
 
-       write_lock_bh(&reftbl_lock);
-       index_mask = ref_table.index_mask;
+       write_lock_bh(&ref_table_lock);
+       index_mask = tipc_ref_table.index_mask;
        index = ref & index_mask;
-       entry = &(ref_table.entries[index]);
+       entry = &(tipc_ref_table.entries[index]);
        assert(entry->object != 0);
        assert(entry->data.reference == ref);
 
        /* mark entry as unused */
        entry->object = 0;
-       if (ref_table.first_free == 0)
-               ref_table.first_free = index;
+       if (tipc_ref_table.first_free == 0)
+               tipc_ref_table.first_free = index;
        else
                /* next_plus_upper is always XXXX|0--0 for last free entry */
-               ref_table.entries[ref_table.last_free].data.next_plus_upper 
+               tipc_ref_table.entries[tipc_ref_table.last_free].data.next_plus_upper 
                        |= index;
-       ref_table.last_free = index;
+       tipc_ref_table.last_free = index;
 
        /* increment upper bits of entry to invalidate subsequent references */
        entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
-       write_unlock_bh(&reftbl_lock);
+       write_unlock_bh(&ref_table_lock);
 }
 
index 429cde57228aa04b40f89be6ebe378cd175972ad..4f8f9f40dcacdcdd0645373a4e157fbf8b76cec9 100644 (file)
@@ -54,7 +54,7 @@ struct reference {
 };
 
 /**
- * struct ref_table - table of TIPC object reference entries
+ * struct tipc_ref_table - table of TIPC object reference entries
  * @entries: pointer to array of reference entries
  * @index_mask: bitmask for array index portion of reference values
  * @first_free: array index of first unused object reference entry
@@ -68,24 +68,24 @@ struct ref_table {
        u32 last_free;
 };
 
-extern struct ref_table ref_table;
+extern struct ref_table tipc_ref_table;
 
-int ref_table_init(u32 requested_size, u32 start);
-void ref_table_stop(void);
+int tipc_ref_table_init(u32 requested_size, u32 start);
+void tipc_ref_table_stop(void);
 
-u32 ref_acquire(void *object, spinlock_t **lock);
-void ref_discard(u32 ref);
+u32 tipc_ref_acquire(void *object, spinlock_t **lock);
+void tipc_ref_discard(u32 ref);
 
 
 /**
- * ref_lock - lock referenced object and return pointer to it
+ * tipc_ref_lock - lock referenced object and return pointer to it
  */
 
-static inline void *ref_lock(u32 ref)
+static inline void *tipc_ref_lock(u32 ref)
 {
-       if (likely(ref_table.entries)) {
+       if (likely(tipc_ref_table.entries)) {
                struct reference *r =
-                       &ref_table.entries[ref & ref_table.index_mask];
+                       &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
 
                spin_lock_bh(&r->lock);
                if (likely(r->data.reference == ref))
@@ -96,31 +96,31 @@ static inline void *ref_lock(u32 ref)
 }
 
 /**
- * ref_unlock - unlock referenced object 
+ * tipc_ref_unlock - unlock referenced object 
  */
 
-static inline void ref_unlock(u32 ref)
+static inline void tipc_ref_unlock(u32 ref)
 {
-       if (likely(ref_table.entries)) {
+       if (likely(tipc_ref_table.entries)) {
                struct reference *r =
-                       &ref_table.entries[ref & ref_table.index_mask];
+                       &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
 
                if (likely(r->data.reference == ref))
                        spin_unlock_bh(&r->lock);
                else
-                       err("ref_unlock() invoked using obsolete reference\n");
+                       err("tipc_ref_unlock() invoked using obsolete reference\n");
        }
 }
 
 /**
- * ref_deref - return pointer referenced object (without locking it)
+ * tipc_ref_deref - return pointer referenced object (without locking it)
  */
 
-static inline void *ref_deref(u32 ref)
+static inline void *tipc_ref_deref(u32 ref)
 {
-       if (likely(ref_table.entries)) {
+       if (likely(tipc_ref_table.entries)) {
                struct reference *r = 
-                       &ref_table.entries[ref & ref_table.index_mask];
+                       &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
 
                if (likely(r->data.reference == ref))
                        return r->object;
index d21f8c0cd25a2e7ec9c0d688db0fb067db471978..67253bfcd70269246d0675b112cdc2fd16e9f9d6 100644 (file)
@@ -42,9 +42,7 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/poll.h>
-#include <linux/version.h>
 #include <linux/fcntl.h>
-#include <linux/version.h>
 #include <asm/semaphore.h>
 #include <asm/string.h>
 #include <asm/atomic.h>
@@ -1185,7 +1183,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
        if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
                sock->state = SS_DISCONNECTING;
                /* Note: Use signal since port lock is already taken! */
-               k_signal((Handler)async_disconnect, tport->ref);
+               tipc_k_signal((Handler)async_disconnect, tport->ref);
        }
 
        /* Enqueue message (finally!) */
@@ -1685,11 +1683,11 @@ static struct proto tipc_proto = {
 };
 
 /**
- * socket_init - initialize TIPC socket interface
+ * tipc_socket_init - initialize TIPC socket interface
  * 
  * Returns 0 on success, errno otherwise
  */
-int socket_init(void)
+int tipc_socket_init(void)
 {
        int res;
 
@@ -1712,9 +1710,9 @@ int socket_init(void)
 }
 
 /**
- * sock_stop - stop TIPC socket interface
+ * tipc_socket_stop - stop TIPC socket interface
  */
-void socket_stop(void)
+void tipc_socket_stop(void)
 {
        if (!sockets_enabled)
                return;
index 80e219ba527d92504103ef0325b4b789c2ce5b59..5ff38b9f31945c02a49bfcc676191e7baeefa006 100644 (file)
@@ -118,14 +118,14 @@ static void subscr_send_event(struct subscription *sub,
 }
 
 /**
- * subscr_overlap - test for subscription overlap with the given values
+ * tipc_subscr_overlap - test for subscription overlap with the given values
  *
  * Returns 1 if there is overlap, otherwise 0.
  */
 
-int subscr_overlap(struct subscription *sub, 
-                  u32 found_lower, 
-                  u32 found_upper)
+int tipc_subscr_overlap(struct subscription *sub, 
+                       u32 found_lower, 
+                       u32 found_upper)
 
 {
        if (found_lower < sub->seq.lower)
@@ -138,22 +138,22 @@ int subscr_overlap(struct subscription *sub,
 }
 
 /**
- * subscr_report_overlap - issue event if there is subscription overlap
+ * tipc_subscr_report_overlap - issue event if there is subscription overlap
  * 
  * Protected by nameseq.lock in name_table.c
  */
 
-void subscr_report_overlap(struct subscription *sub, 
-                          u32 found_lower, 
-                          u32 found_upper,
-                          u32 event, 
-                          u32 port_ref, 
-                          u32 node,
-                          int must)
+void tipc_subscr_report_overlap(struct subscription *sub, 
+                               u32 found_lower, 
+                               u32 found_upper,
+                               u32 event, 
+                               u32 port_ref, 
+                               u32 node,
+                               int must)
 {
        dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower,
            sub->seq.upper, found_lower, found_upper);
-       if (!subscr_overlap(sub, found_lower, found_upper))
+       if (!tipc_subscr_overlap(sub, found_lower, found_upper))
                return;
        if (!must && (sub->filter != TIPC_SUB_PORTS))
                return;
@@ -172,13 +172,13 @@ static void subscr_timeout(struct subscription *sub)
        /* Validate subscriber reference (in case subscriber is terminating) */
 
        subscriber_ref = sub->owner->ref;
-       subscriber = (struct subscriber *)ref_lock(subscriber_ref);
+       subscriber = (struct subscriber *)tipc_ref_lock(subscriber_ref);
        if (subscriber == NULL)
                return;
 
        /* Unlink subscription from name table */
 
-       nametbl_unsubscribe(sub);
+       tipc_nametbl_unsubscribe(sub);
 
        /* Notify subscriber of timeout, then unlink subscription */
 
@@ -192,7 +192,7 @@ static void subscr_timeout(struct subscription *sub)
 
        /* Now destroy subscription */
 
-       ref_unlock(subscriber_ref);
+       tipc_ref_unlock(subscriber_ref);
        k_term_timer(&sub->timer);
        kfree(sub);
        atomic_dec(&topsrv.subscription_count);
@@ -216,7 +216,7 @@ static void subscr_terminate(struct subscriber *subscriber)
 
        /* Invalidate subscriber reference */
 
-       ref_discard(subscriber->ref);
+       tipc_ref_discard(subscriber->ref);
        spin_unlock_bh(subscriber->lock);
 
        /* Destroy any existing subscriptions for subscriber */
@@ -227,7 +227,7 @@ static void subscr_terminate(struct subscriber *subscriber)
                        k_cancel_timer(&sub->timer);
                        k_term_timer(&sub->timer);
                }
-               nametbl_unsubscribe(sub);
+               tipc_nametbl_unsubscribe(sub);
                list_del(&sub->subscription_list);
                dbg("Term: Removed sub %u,%u,%u from subscriber %x list\n",
                    sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
@@ -315,7 +315,7 @@ static void subscr_subscribe(struct tipc_subscr *s,
                k_start_timer(&sub->timer, sub->timeout);
        }
        sub->owner = subscriber;
-       nametbl_subscribe(sub);
+       tipc_nametbl_subscribe(sub);
 }
 
 /**
@@ -332,7 +332,7 @@ static void subscr_conn_shutdown_event(void *usr_handle,
        struct subscriber *subscriber;
        spinlock_t *subscriber_lock;
 
-       subscriber = ref_lock((u32)(unsigned long)usr_handle);
+       subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle);
        if (subscriber == NULL)
                return;
 
@@ -354,7 +354,7 @@ static void subscr_conn_msg_event(void *usr_handle,
        struct subscriber *subscriber;
        spinlock_t *subscriber_lock;
 
-       subscriber = ref_lock((u32)(unsigned long)usr_handle);
+       subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle);
        if (subscriber == NULL)
                return;
 
@@ -401,7 +401,7 @@ static void subscr_named_msg_event(void *usr_handle,
        memset(subscriber, 0, sizeof(struct subscriber));
        INIT_LIST_HEAD(&subscriber->subscription_list);
        INIT_LIST_HEAD(&subscriber->subscriber_list);
-       subscriber->ref = ref_acquire(subscriber, &subscriber->lock);
+       subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
        if (subscriber->ref == 0) {
                warn("Failed to acquire subscriber reference\n");
                kfree(subscriber);
@@ -423,7 +423,7 @@ static void subscr_named_msg_event(void *usr_handle,
                        &subscriber->port_ref);
        if (subscriber->port_ref == 0) {
                warn("Memory squeeze; failed to create subscription port\n");
-               ref_discard(subscriber->ref);
+               tipc_ref_discard(subscriber->ref);
                kfree(subscriber);
                return;
        }
@@ -432,7 +432,7 @@ static void subscr_named_msg_event(void *usr_handle,
 
        /* Add subscriber to topology server's subscriber list */
 
-       ref_lock(subscriber->ref);
+       tipc_ref_lock(subscriber->ref);
        spin_lock_bh(&topsrv.lock);
        list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
        spin_unlock_bh(&topsrv.lock);
@@ -451,7 +451,7 @@ static void subscr_named_msg_event(void *usr_handle,
        spin_unlock_bh(subscriber_lock);
 }
 
-int subscr_start(void)
+int tipc_subscr_start(void)
 {
        struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
        int res = -1;
@@ -481,7 +481,7 @@ int subscr_start(void)
        if (res)
                goto failed;
 
-       res = nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
+       res = tipc_nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
        if (res)
                goto failed;
 
@@ -496,7 +496,7 @@ failed:
        return res;
 }
 
-void subscr_stop(void)
+void tipc_subscr_stop(void)
 {
        struct subscriber *subscriber;
        struct subscriber *subscriber_temp;
@@ -507,7 +507,7 @@ void subscr_stop(void)
                list_for_each_entry_safe(subscriber, subscriber_temp, 
                                         &topsrv.subscriber_list,
                                         subscriber_list) {
-                       ref_lock(subscriber->ref);
+                       tipc_ref_lock(subscriber->ref);
                        subscriber_lock = subscriber->lock;
                        subscr_terminate(subscriber);
                        spin_unlock_bh(subscriber_lock);
@@ -522,6 +522,6 @@ int tipc_ispublished(struct tipc_name const *name)
 {
        u32 domain = 0;
 
-       return(nametbl_translate(name->type, name->instance,&domain) != 0);
+       return(tipc_nametbl_translate(name->type, name->instance,&domain) != 0);
 }
 
index ccff4efcb7555beb786e859a2ad80ba81eb04d0b..1e5090465d2e3ba00756e37027967f916c0ee46e 100644 (file)
@@ -60,21 +60,21 @@ struct subscription {
        struct subscriber *owner;
 };
 
-int subscr_overlap(struct subscription * sub, 
-                  u32 found_lower, 
-                  u32 found_upper);
+int tipc_subscr_overlap(struct subscription * sub, 
+                       u32 found_lower, 
+                       u32 found_upper);
 
-void subscr_report_overlap(struct subscription * sub, 
-                          u32 found_lower, 
-                          u32 found_upper,
-                          u32 event, 
-                          u32 port_ref, 
-                          u32 node,
-                          int must_report);
+void tipc_subscr_report_overlap(struct subscription * sub, 
+                               u32 found_lower, 
+                               u32 found_upper,
+                               u32 event, 
+                               u32 port_ref, 
+                               u32 node,
+                               int must_report);
 
-int subscr_start(void);
+int tipc_subscr_start(void);
 
-void subscr_stop(void);
+void tipc_subscr_stop(void);
 
 
 #endif
index 35ec7dc8211da8dc234922790c752f65f5e635be..106200d765873d074bf3afd515a25bab7104e6da 100644 (file)
@@ -114,10 +114,10 @@ static void reg_callback(struct tipc_user *user_ptr)
 }
 
 /**
- * reg_start - activate TIPC user registry
+ * tipc_reg_start - activate TIPC user registry
  */
 
-int reg_start(void)
+int tipc_reg_start(void)
 {
        u32 u;
        int res;
@@ -127,17 +127,17 @@ int reg_start(void)
 
        for (u = 1; u <= MAX_USERID; u++) {
                if (users[u].callback)
-                       k_signal((Handler)reg_callback,
-                                (unsigned long)&users[u]);
+                       tipc_k_signal((Handler)reg_callback,
+                                     (unsigned long)&users[u]);
        }
        return TIPC_OK;
 }
 
 /**
- * reg_stop - shut down & delete TIPC user registry
+ * tipc_reg_stop - shut down & delete TIPC user registry
  */
 
-void reg_stop(void)
+void tipc_reg_stop(void)
 {               
        int id;
 
@@ -184,7 +184,7 @@ int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
        atomic_inc(&tipc_user_count);
        
        if (cb && (tipc_mode != TIPC_NOT_RUNNING))
-               k_signal((Handler)reg_callback, (unsigned long)user_ptr);
+               tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr);
        return TIPC_OK;
 }
 
@@ -223,10 +223,10 @@ void tipc_detach(u32 userid)
 }
 
 /**
- * reg_add_port - register a user's driver port
+ * tipc_reg_add_port - register a user's driver port
  */
 
-int reg_add_port(struct user_port *up_ptr)
+int tipc_reg_add_port(struct user_port *up_ptr)
 {
        struct tipc_user *user_ptr;
 
@@ -245,10 +245,10 @@ int reg_add_port(struct user_port *up_ptr)
 }
 
 /**
- * reg_remove_port - deregister a user's driver port
+ * tipc_reg_remove_port - deregister a user's driver port
  */
 
-int reg_remove_port(struct user_port *up_ptr)
+int tipc_reg_remove_port(struct user_port *up_ptr)
 {
        if (up_ptr->user_ref == 0)
                return TIPC_OK;
index 122ca9be36711775c2b0f67a1d3548017cf6117b..d0e88794ed1b48b938d5390027ba3fea923f3e13 100644 (file)
 
 #include "port.h"
 
-int reg_start(void);
-void reg_stop(void);
+int tipc_reg_start(void);
+void tipc_reg_stop(void);
 
-int reg_add_port(struct user_port *up_ptr);
-int reg_remove_port(struct user_port *up_ptr);
+int tipc_reg_add_port(struct user_port *up_ptr);
+int tipc_reg_remove_port(struct user_port *up_ptr);
 
 #endif
index 4eaef662d568bbeced6c12dae7fe469a80fd82b7..7c11f7f83a2188541fff3378b6681523d41e38c3 100644 (file)
 #include "cluster.h"
 #include "node.h"
 
-struct _zone *zone_create(u32 addr)
+struct _zone *tipc_zone_create(u32 addr)
 {
        struct _zone *z_ptr = 0;
        u32 z_num;
 
-       if (!addr_domain_valid(addr))
+       if (!tipc_addr_domain_valid(addr))
                return 0;
 
        z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
@@ -55,24 +55,24 @@ struct _zone *zone_create(u32 addr)
                memset(z_ptr, 0, sizeof(*z_ptr));
                z_num = tipc_zone(addr);
                z_ptr->addr = tipc_addr(z_num, 0, 0);
-               net.zones[z_num] = z_ptr;
+               tipc_net.zones[z_num] = z_ptr;
        }
        return z_ptr;
 }
 
-void zone_delete(struct _zone *z_ptr)
+void tipc_zone_delete(struct _zone *z_ptr)
 {
        u32 c_num;
 
        if (!z_ptr)
                return;
        for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
-               cluster_delete(z_ptr->clusters[c_num]);
+               tipc_cltr_delete(z_ptr->clusters[c_num]);
        }
        kfree(z_ptr);
 }
 
-void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
+void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
 {
        u32 c_num = tipc_cluster(c_ptr->addr);
 
@@ -82,19 +82,19 @@ void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
        z_ptr->clusters[c_num] = c_ptr;
 }
 
-void zone_remove_as_router(struct _zone *z_ptr, u32 router)
+void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router)
 {
        u32 c_num;
 
        for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
                if (z_ptr->clusters[c_num]) {
-                       cluster_remove_as_router(z_ptr->clusters[c_num], 
-                                                router);
+                       tipc_cltr_remove_as_router(z_ptr->clusters[c_num], 
+                                                  router);
                }
        }
 }
 
-void zone_send_external_routes(struct _zone *z_ptr, u32 dest)
+void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest)
 {
        u32 c_num;
 
@@ -102,12 +102,12 @@ void zone_send_external_routes(struct _zone *z_ptr, u32 dest)
                if (z_ptr->clusters[c_num]) {
                        if (in_own_cluster(z_ptr->addr))
                                continue;
-                       cluster_send_ext_routes(z_ptr->clusters[c_num], dest);
+                       tipc_cltr_send_ext_routes(z_ptr->clusters[c_num], dest);
                }
        }
 }
 
-struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
+struct node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
 {
        struct cluster *c_ptr;
        struct node *n_ptr;
@@ -118,7 +118,7 @@ struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
        c_ptr = z_ptr->clusters[tipc_cluster(addr)];
        if (!c_ptr)
                return 0;
-       n_ptr = cluster_select_node(c_ptr, ref);
+       n_ptr = tipc_cltr_select_node(c_ptr, ref);
        if (n_ptr)
                return n_ptr;
 
@@ -127,14 +127,14 @@ struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
                c_ptr = z_ptr->clusters[c_num];
                if (!c_ptr)
                        return 0;
-               n_ptr = cluster_select_node(c_ptr, ref);
+               n_ptr = tipc_cltr_select_node(c_ptr, ref);
                if (n_ptr)
                        return n_ptr;
        }
        return 0;
 }
 
-u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
+u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
 {
        struct cluster *c_ptr;
        u32 c_num;
@@ -143,14 +143,14 @@ u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
        if (!z_ptr)
                return 0;
        c_ptr = z_ptr->clusters[tipc_cluster(addr)];
-       router = c_ptr ? cluster_select_router(c_ptr, ref) : 0;
+       router = c_ptr ? tipc_cltr_select_router(c_ptr, ref) : 0;
        if (router)
                return router;
 
        /* Links to any other clusters within the zone? */
        for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
                c_ptr = z_ptr->clusters[c_num];
-               router = c_ptr ? cluster_select_router(c_ptr, ref) : 0;
+               router = c_ptr ? tipc_cltr_select_router(c_ptr, ref) : 0;
                if (router)
                        return router;
        }
@@ -158,12 +158,12 @@ u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
 }
 
 
-u32 zone_next_node(u32 addr)
+u32 tipc_zone_next_node(u32 addr)
 {
-       struct cluster *c_ptr = cluster_find(addr);
+       struct cluster *c_ptr = tipc_cltr_find(addr);
 
        if (c_ptr)
-               return cluster_next_node(c_ptr, addr);
+               return tipc_cltr_next_node(c_ptr, addr);
        return 0;
 }
 
index 4326f78d82926f5c730e87e302dbfee96048366b..267999c5a240fb570fa9e52e4694b482b2e7f700 100644 (file)
@@ -54,18 +54,18 @@ struct _zone {
        u32 links;
 };
 
-struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref);
-u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref);
-void zone_remove_as_router(struct _zone *z_ptr, u32 router);
-void zone_send_external_routes(struct _zone *z_ptr, u32 dest);
-struct _zone *zone_create(u32 addr);
-void zone_delete(struct _zone *z_ptr);
-void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr);
-u32 zone_next_node(u32 addr);
+struct node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref);
+u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref);
+void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router);
+void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest);
+struct _zone *tipc_zone_create(u32 addr);
+void tipc_zone_delete(struct _zone *z_ptr);
+void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr);
+u32 tipc_zone_next_node(u32 addr);
 
-static inline struct _zone *zone_find(u32 addr)
+static inline struct _zone *tipc_zone_find(u32 addr)
 {
-       return net.zones[tipc_zone(addr)];
+       return tipc_net.zones[tipc_zone(addr)];
 }
 
 #endif
index 1caac0164643c202a55c1e8394b9669706723591..8529ea6f7aa83437174727478988702d4e9db89b 100644 (file)
@@ -368,8 +368,8 @@ static int seclvl_capable(struct task_struct *tsk, int cap)
  */
 static int seclvl_settime(struct timespec *tv, struct timezone *tz)
 {
-       struct timespec now;
-       if (seclvl > 1) {
+       if (tv && seclvl > 1) {
+               struct timespec now;
                now = current_kernel_time();
                if (tv->tv_sec < now.tv_sec ||
                    (tv->tv_sec == now.tv_sec && tv->tv_nsec < now.tv_nsec)) {
index 5f0ad6bb43b9f5adc0fbc09bc75b76c9d8a68580..a21c663e7e12564a334b2de16957a99cb95fd21a 100644 (file)
@@ -278,16 +278,14 @@ static char *card_names[] = {
 };
 
 static struct pci_device_id trident_pci_tbl[] = {
-       {PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_DX,
-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, TRIDENT_4D_DX},
-       {PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_NX,
-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, TRIDENT_4D_NX},
-       {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7018,
-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_7018},
-       {PCI_VENDOR_ID_ALI, PCI_DEVICE_ID_ALI_5451,
-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, ALI_5451},
-       {PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_5050,
-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, CYBER5050},
+       {PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_DX),
+               PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, TRIDENT_4D_DX},
+       {PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_NX),
+               0, 0, TRIDENT_4D_NX},
+       {PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7018), 0, 0, SIS_7018},
+       {PCI_DEVICE(PCI_VENDOR_ID_ALI, PCI_DEVICE_ID_ALI_5451), 0, 0, ALI_5451},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_5050),
+               0, 0, CYBER5050},
        {0,}
 };
 
index e9086e95a31f13686a3db0cde9946f9a72091332..fd654399878854f22715cf1539a9fea4ec721178 100644 (file)
@@ -69,13 +69,14 @@ struct sbus_dma_info {
 };
 #endif
 
+struct snd_cs4231;
 struct cs4231_dma_control {
         void           (*prepare)(struct cs4231_dma_control *dma_cont, int dir);
         void           (*enable)(struct cs4231_dma_control *dma_cont, int on);
         int            (*request)(struct cs4231_dma_control *dma_cont, dma_addr_t bus_addr, size_t len);
         unsigned int   (*address)(struct cs4231_dma_control *dma_cont);
         void           (*reset)(struct snd_cs4231 *chip); 
-        void           (*preallocate)(struct snd_cs4231 *chip, struct snd_snd_pcm *pcm); 
+        void           (*preallocate)(struct snd_cs4231 *chip, struct snd_pcm *pcm); 
 #ifdef EBUS_SUPPORT
        struct          ebus_dma_info   ebus_info;
 #endif