S: Maintained
i386 SETUP CODE / CPU ERRATA WORKAROUNDS
-P: Dave Jones
-M: davej@codemonkey.org.uk
P: H. Peter Anvin
M: hpa@zytor.com
S: Maintained
P: Marcel Selhorst
M: tpm@selhorst.net
W: http://www.prosec.rub.de/tpm/
+L: tpmdd-devel@lists.sourceforge.net
S: Maintained
Telecom Clock Driver for MCPL0010
obj-$(CONFIG_MICROCODE) += microcode.o
obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o
+obj-$(CONFIG_SMP) += smpcommon.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
pc.val = (unsigned long) acpi_processor_perf->states[0].control;
for (i = 0; i < number_scales; i++) {
u8 fid, vid;
- unsigned int speed;
+ struct acpi_processor_px *state =
+ &acpi_processor_perf->states[i];
+ unsigned int speed, speed_mhz;
- pc.val = (unsigned long) acpi_processor_perf->states[i].control;
+ pc.val = (unsigned long) state->control;
dprintk ("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
i,
- (u32) acpi_processor_perf->states[i].core_frequency,
- (u32) acpi_processor_perf->states[i].power,
- (u32) acpi_processor_perf->states[i].transition_latency,
- (u32) acpi_processor_perf->states[i].control,
+ (u32) state->core_frequency,
+ (u32) state->power,
+ (u32) state->transition_latency,
+ (u32) state->control,
pc.bits.sgtc);
vid = pc.bits.vid;
powernow_table[i].index |= (vid << 8); /* upper 8 bits */
speed = powernow_table[i].frequency;
+ speed_mhz = speed / 1000;
+
+ /* processor_perflib will multiply the MHz value by 1000 to
+ * get a KHz value (e.g. 1266000). However, powernow-k7 works
+ * with true KHz values (e.g. 1266768). To ensure that all
+ * powernow frequencies are available, we must ensure that
+ * ACPI doesn't restrict them, so we round up the MHz value
+ * to ensure that perflib's computed KHz value is greater than
+ * or equal to powernow's KHz value.
+ */
+ if (speed % 1000 > 0)
+ speed_mhz++;
if ((fid_codes[fid] % 10)==5) {
if (have_a0 == 1)
dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
"VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
- fid_codes[fid] % 10, speed/1000, vid,
+ fid_codes[fid] % 10, speed_mhz, vid,
mobile_vid_table[vid]/1000,
mobile_vid_table[vid]%1000);
+ if (state->core_frequency != speed_mhz) {
+ state->core_frequency = speed_mhz;
+ dprintk(" Corrected ACPI frequency to %d\n",
+ speed_mhz);
+ }
+
if (latency < pc.bits.sgtc)
latency = pc.bits.sgtc;
result = powernow_acpi_init();
if (result) {
printk (KERN_INFO PFX "ACPI and legacy methods failed\n");
- printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.shtml\n");
+ printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.html\n");
}
} else {
/* SGTC use the bus clock as timer */
if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
- ((eax & CPUID_XMOD) > CPUID_XMOD_REV_G)) {
+ ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
goto out;
}
#define CPUID_XFAM 0x0ff00000 /* extended family */
#define CPUID_XFAM_K8 0
#define CPUID_XMOD 0x000f0000 /* extended model */
-#define CPUID_XMOD_REV_G 0x00060000
-#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */
+#define CPUID_XMOD_REV_MASK 0x00080000
+#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */
#define CPUID_USE_XFAM_XMOD 0x00000f00
#define CPUID_GET_MAX_CAPABILITIES 0x80000000
#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
-void native_smp_send_reschedule(int cpu)
+static void native_smp_send_reschedule(int cpu)
{
WARN_ON(cpu_is_offline(cpu));
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int native_smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+static int
+native_smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
struct call_data_struct data;
cpumask_t allbutself;
return 0;
}
-/**
- * smp_call_function(): Run a function on all other CPUs.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- return smp_call_function_mask(cpu_online_map, func, info, wait);
-}
-EXPORT_SYMBOL(smp_call_function);
-
-/**
- * smp_call_function_single - Run a function on another CPU
- * @cpu: The target CPU. Cannot be the calling CPU.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- */
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- /* prevent preemption and reschedule on another processor */
- int ret;
- int me = get_cpu();
- if (cpu == me) {
- WARN_ON(1);
- put_cpu();
- return -EBUSY;
- }
-
- ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
-
- put_cpu();
- return ret;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
static void stop_this_cpu (void * dummy)
{
local_irq_disable();
* this function calls the 'stop' function on all other CPUs in the system.
*/
-void native_smp_send_stop(void)
+static void native_smp_send_stop(void)
{
/* Don't deadlock on the call lock in panic */
int nolock = !spin_trylock(&call_lock);
u8 apicid_2_node[MAX_APICID];
-DEFINE_PER_CPU(unsigned long, this_cpu_off);
-EXPORT_PER_CPU_SYMBOL(this_cpu_off);
-
/*
* Trampoline 80x86 program as an array.
*/
#define alloc_idle_task(cpu) fork_idle(cpu)
#endif
-/* Initialize the CPU's GDT. This is either the boot CPU doing itself
- (still using the master per-cpu area), or a CPU doing it for a
- secondary which will soon come up. */
-static __cpuinit void init_gdt(int cpu)
-{
- struct desc_struct *gdt = get_cpu_gdt_table(cpu);
-
- pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
- (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
- __per_cpu_offset[cpu], 0xFFFFF,
- 0x80 | DESCTYPE_S | 0x2, 0x8);
-
- per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
- per_cpu(cpu_number, cpu) = cpu;
-}
-
-/* Defined in head.S */
-extern struct Xgt_desc_struct early_gdt_descr;
-
static int __cpuinit do_boot_cpu(int apicid, int cpu)
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
--- /dev/null
+/*
+ * SMP stuff which is common to all sub-architectures.
+ */
+#include <linux/module.h>
+#include <asm/smp.h>
+
+DEFINE_PER_CPU(unsigned long, this_cpu_off);
+EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+
+/* Initialize the CPU's GDT. This is either the boot CPU doing itself
+ (still using the master per-cpu area), or a CPU doing it for a
+ secondary which will soon come up. */
+__cpuinit void init_gdt(int cpu)
+{
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+
+ pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
+ (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
+ __per_cpu_offset[cpu], 0xFFFFF,
+ 0x80 | DESCTYPE_S | 0x2, 0x8);
+
+ per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
+ per_cpu(cpu_number, cpu) = cpu;
+}
+
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: Unused.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+ int wait)
+{
+ return smp_call_function_mask(cpu_online_map, func, info, wait);
+}
+EXPORT_SYMBOL(smp_call_function);
+
+/**
+ * smp_call_function_single - Run a function on another CPU
+ * @cpu: The target CPU. Cannot be the calling CPU.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: Unused.
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int nonatomic, int wait)
+{
+ /* prevent preemption and reschedule on another processor */
+ int ret;
+ int me = get_cpu();
+ if (cpu == me) {
+ WARN_ON(1);
+ put_cpu();
+ return -EBUSY;
+ }
+
+ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
+
+ put_cpu();
+ return ret;
+}
+EXPORT_SYMBOL(smp_call_function_single);
*/
if (nmi_watchdog_tick(regs, reason))
return;
-#endif
- if (notify_die(DIE_NMI_POST, "nmi_post", regs, reason, 2, 0)
- == NOTIFY_STOP)
- return;
-#ifdef CONFIG_X86_LOCAL_APIC
if (!do_nmi_callback(regs, smp_processor_id()))
#endif
unknown_nmi_error(reason, regs);
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/arch_hooks.h>
-#include <asm/pda.h>
/* TLB state -- visible externally, indexed physically */
DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
VOYAGER_SUS_IN_CONTROL_PORT);
current_thread_info()->cpu = boot_cpu_id;
- write_pda(cpu_number, boot_cpu_id);
+ x86_write_percpu(cpu_number, boot_cpu_id);
}
/*
*c = boot_cpu_data;
- identify_cpu(c);
+ identify_secondary_cpu(c);
}
/* set up the trampoline and return the physical address of the code */
/* external functions not defined in the headers */
extern void calibrate_delay(void);
- secondary_cpu_init();
+ cpu_init();
/* OK, we're in the routine */
ack_CPI(VIC_CPU_BOOT_CPI);
/* init_tasks (in sched.c) is indexed logically */
stack_start.esp = (void *) idle->thread.esp;
- init_gdt(cpu, idle);
+ init_gdt(cpu);
+ per_cpu(current_task, cpu) = idle;
+ early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
irq_ctx_init(cpu);
/* Note: Don't modify initial ss override */
/* This routine is called with a physical cpu mask */
static void
-flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
- unsigned long va)
+voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+ unsigned long va)
{
int stuck = 50000;
cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
local_flush_tlb();
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
leave_mm(smp_processor_id());
}
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+ voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
}
if (cpu_mask)
- flush_tlb_others(cpu_mask, mm, va);
+ voyager_flush_tlb_others(cpu_mask, mm, va);
preempt_enable();
}
}
static int
-__smp_call_function_mask (void (*func) (void *info), void *info, int retry,
- int wait, __u32 mask)
+voyager_smp_call_function_mask (cpumask_t cpumask,
+ void (*func) (void *info), void *info,
+ int wait)
{
struct call_data_struct data;
+ u32 mask = cpus_addr(cpumask)[0];
mask &= ~(1<<smp_processor_id());
return 0;
}
-/* Call this function on all CPUs using the function_interrupt above
- <func> The function to run. This must be fast and non-blocking.
- <info> An arbitrary pointer to pass to the function.
- <retry> If true, keep retrying until ready.
- <wait> If true, wait until function has completed on other CPUs.
- [RETURNS] 0 on success, else a negative status code. Does not return until
- remote CPUs are nearly ready to execute <<func>> or are or have executed.
-*/
-int
-smp_call_function(void (*func) (void *info), void *info, int retry,
- int wait)
-{
- __u32 mask = cpus_addr(cpu_online_map)[0];
-
- return __smp_call_function_mask(func, info, retry, wait, mask);
-}
-EXPORT_SYMBOL(smp_call_function);
-
-/*
- * smp_call_function_single - Run a function on another CPU
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @nonatomic: Currently unused.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Retrurns 0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
- */
-
-int
-smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- __u32 mask = 1 << cpu;
-
- return __smp_call_function_mask(func, info, nonatomic, wait, mask);
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
/* Sorry about the name. In an APIC based system, the APICs
* themselves are programmed to send a timer interrupt. This is used
* by linux to reschedule the processor. Voyager doesn't have this,
}
/* send a reschedule CPI to one CPU by physical CPU number*/
-void
-smp_send_reschedule(int cpu)
+static void
+voyager_smp_send_reschedule(int cpu)
{
send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
}
}
/* broadcast a halt to all other CPUs */
-void
-smp_send_stop(void)
+static void
+voyager_smp_send_stop(void)
{
smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
}
smp_stop_cpu_function(NULL);
}
-void __init
-smp_prepare_cpus(unsigned int max_cpus)
+static void __init
+voyager_smp_prepare_cpus(unsigned int max_cpus)
{
/* FIXME: ignore max_cpus for now */
smp_boot_cpus();
}
-void __devinit smp_prepare_boot_cpu(void)
+static void __devinit voyager_smp_prepare_boot_cpu(void)
{
+ init_gdt(smp_processor_id());
+ switch_to_new_gdt();
+
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_callout_map);
cpu_set(smp_processor_id(), cpu_possible_map);
cpu_set(smp_processor_id(), cpu_present_map);
}
-int __devinit
-__cpu_up(unsigned int cpu)
+static int __devinit
+voyager_cpu_up(unsigned int cpu)
{
/* This only works at boot for x86. See "rewrite" above. */
if (cpu_isset(cpu, smp_commenced_mask))
return 0;
}
-void __init
-smp_cpus_done(unsigned int max_cpus)
+static void __init
+voyager_smp_cpus_done(unsigned int max_cpus)
{
zap_low_mappings();
}
smp_setup_processor_id(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
- write_pda(cpu_number, hard_smp_processor_id());
+ x86_write_percpu(cpu_number, hard_smp_processor_id());
}
+
+struct smp_ops smp_ops = {
+ .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
+ .smp_prepare_cpus = voyager_smp_prepare_cpus,
+ .cpu_up = voyager_cpu_up,
+ .smp_cpus_done = voyager_smp_cpus_done,
+
+ .smp_send_stop = voyager_smp_send_stop,
+ .smp_send_reschedule = voyager_smp_send_reschedule,
+ .smp_call_function_mask = voyager_smp_call_function_mask,
+};
#include <linux/module.h>
#include <linux/kexec.h>
#include <linux/pfn.h>
+#include <linux/swap.h>
#include <asm/e820.h>
#include <asm/setup.h>
#endif
extern unsigned long find_max_low_pfn(void);
-extern void find_max_pfn(void);
extern void add_one_highpage_init(struct page *, int, int);
-
-extern struct e820map e820;
extern unsigned long highend_pfn, highstart_pfn;
-extern unsigned long max_low_pfn;
-extern unsigned long totalram_pages;
-extern unsigned long totalhigh_pages;
#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
max_zone_pfns[ZONE_DMA] =
virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+#ifdef CONFIG_HIGHMEM
max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
+#endif
/* If SRAT has not registered memory, register it now */
if (find_max_pfn_with_active_regions() == 0) {
* Zero Userspace
*/
-unsigned long clear_user(void __user *to, unsigned long n)
+unsigned long __clear_user(void __user *to, unsigned long n)
{
unsigned long res;
return res;
}
-EXPORT_SYMBOL(clear_user);
+EXPORT_SYMBOL(__clear_user);
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.20-rc1
-# Sun Dec 17 14:20:47 2006
+# Linux kernel version: 2.6.22-rc1
+# Mon May 14 03:25:14 2007
#
CONFIG_MMU=y
CONFIG_HIGHMEM=y
+CONFIG_ZONE_DMA=y
CONFIG_GENERIC_ISA_DMA=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
# CONFIG_IPC_NS is not set
+CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_UTS_NS is not set
# CONFIG_AUDIT is not set
# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED=y
# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ELF_CORE=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
CONFIG_SHMEM=y
-CONFIG_SLAB=y
CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
-# CONFIG_SLOB is not set
#
# Loadable module support
CONFIG_SUN_PM=y
# CONFIG_SUN4 is not set
CONFIG_PCI=y
-# CONFIG_PCI_MULTITHREAD_PROBE is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
# CONFIG_PCI_DEBUG is not set
CONFIG_SUN_OPENPROMFS=m
# CONFIG_SPARC_LED is not set
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
#
# Networking
#
# Networking options
#
-# CONFIG_NETDEBUG is not set
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
CONFIG_XFRM=y
CONFIG_XFRM_USER=m
# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
CONFIG_NET_KEY=m
+# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
# CONFIG_IP_ADVANCED_ROUTER is not set
CONFIG_IPV6=m
CONFIG_IPV6_PRIVACY=y
# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
+CONFIG_AF_RXRPC=m
+# CONFIG_AF_RXRPC_DEBUG is not set
+# CONFIG_RXKAD is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
#
# Device Drivers
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
#
# Connector - unified userspace <-> kernelspace linker
#
# CONFIG_CONNECTOR is not set
-
-#
-# Memory Technology Devices (MTD)
-#
# CONFIG_MTD is not set
#
#
# Plug and Play support
#
+# CONFIG_PNPACPI is not set
#
# Block devices
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
-CONFIG_BLK_DEV_INITRD=y
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
#
# Misc devices
#
+# CONFIG_PHANTOM is not set
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
+# CONFIG_BLINK is not set
#
# ATA/ATAPI/MFM/RLL support
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
#
# SCSI Transports
#
-CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SPI_ATTRS=y
# CONFIG_SCSI_FC_ATTRS is not set
# CONFIG_SCSI_ISCSI_ATTRS is not set
# CONFIG_SCSI_SAS_ATTRS is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
+CONFIG_SCSI_ESP_CORE=y
CONFIG_SCSI_SUNESP=y
# CONFIG_SCSI_SRP is not set
-
-#
-# Serial ATA (prod) and Parallel ATA (experimental) drivers
-#
# CONFIG_ATA is not set
#
#
# IEEE 1394 (FireWire) support
#
+# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
#
# ARCnet devices
#
# CONFIG_ARCNET is not set
-
-#
-# PHY device support
-#
# CONFIG_PHYLIB is not set
#
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
# CONFIG_NET_PCI is not set
-
-#
-# Ethernet (1000 Mbit)
-#
+CONFIG_NETDEV_1000=y
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
# CONFIG_E1000 is not set
# CONFIG_TIGON3 is not set
# CONFIG_BNX2 is not set
# CONFIG_QLA3XXX is not set
-
-#
-# Ethernet (10000 Mbit)
-#
+# CONFIG_ATL1 is not set
+CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
# CONFIG_MYRI10GE is not set
# CONFIG_NETXEN_NIC is not set
+# CONFIG_MLX4_CORE is not set
+CONFIG_MLX4_DEBUG=y
#
# Token Ring devices
# CONFIG_TR is not set
#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Wan interfaces
+# Wireless LAN
#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
CONFIG_MOUSE_SERIAL=m
+# CONFIG_MOUSE_APPLETOUCH is not set
# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
# IPMI
#
# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
# CONFIG_WATCHDOG is not set
CONFIG_HW_RANDOM=m
CONFIG_RTC=m
-# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_DRM is not set
# TPM devices
#
# CONFIG_TCG_TPM is not set
-
-#
-# I2C support
-#
+CONFIG_DEVPORT=y
# CONFIG_I2C is not set
#
# Dallas's 1-wire bus
#
# CONFIG_W1 is not set
-
-#
-# Hardware Monitoring support
-#
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
# CONFIG_SENSORS_ABITUGURU is not set
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83627HF is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+
#
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_DAB is not set
#
-# Digital Video Broadcasting Devices
+# Graphics support
#
-# CONFIG_DVB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
-# Graphics support
+# Display device support
#
-CONFIG_FIRMWARE_EDID=y
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_VGASTATE is not set
# CONFIG_FB is not set
#
#
# CONFIG_PROM_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
# Sound
# HID Devices
#
CONFIG_HID=y
+# CONFIG_HID_DEBUG is not set
#
# USB support
# USB Gadget Support
#
# CONFIG_USB_GADGET is not set
-
-#
-# MMC/SD Card support
-#
# CONFIG_MMC is not set
#
# DMA Devices
#
-#
-# Virtualization
-#
-
#
# Misc Linux/SPARC drivers
#
#
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
# CONFIG_HFS_FS is not set
# CONFIG_HFSPLUS_FS is not set
CONFIG_BEFS_FS=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
CONFIG_SUNRPC_GSS=m
+# CONFIG_SUNRPC_BIND34 is not set
CONFIG_RPCSEC_GSS_KRB5=m
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
CONFIG_AFS_FS=m
-CONFIG_RXRPC=m
+# CONFIG_AFS_DEBUG is not set
# CONFIG_9P_FS is not set
#
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
-CONFIG_LOG_BUF_SHIFT=14
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_SLAB is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_MUTEXES is not set
-# CONFIG_DEBUG_RWSEMS is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_LIST is not set
CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
# CONFIG_DEBUG_STACK_USAGE is not set
#
# Security options
#
-# CONFIG_KEYS is not set
+CONFIG_KEYS=y
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
# CONFIG_SECURITY is not set
#
# CONFIG_CRYPTO_GF128MUL is not set
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_TWOFISH_COMMON=m
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_CAMELLIA is not set
# CONFIG_CRYPTO_TEST is not set
#
CONFIG_BITREVERSE=y
# CONFIG_CRC_CCITT is not set
# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
CONFIG_PLIST=y
-CONFIG_IOMAP_COPY=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/page.h>
-#include <linux/kdebug.h>
+#include <asm/kdebug.h>
#include <asm/winmacro.h>
#include <asm/thread_info.h> /* TI_UWINMASK */
#include <asm/errno.h>
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.21
-# Fri May 11 14:31:45 2007
+# Linux kernel version: 2.6.22-rc1
+# Mon May 14 04:17:48 2007
#
CONFIG_SPARC=y
CONFIG_SPARC64=y
# CONFIG_SCSI_ESP_CORE is not set
# CONFIG_SCSI_SUNESP is not set
# CONFIG_SCSI_SRP is not set
-
-#
-# Serial ATA (prod) and Parallel ATA (experimental) drivers
-#
# CONFIG_ATA is not set
#
# ARCnet devices
#
# CONFIG_ARCNET is not set
-
-#
-# PHY device support
-#
# CONFIG_PHYLIB is not set
#
# CONFIG_SUNDANCE is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_SC92031 is not set
-
-#
-# Ethernet (1000 Mbit)
-#
+CONFIG_NETDEV_1000=y
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
CONFIG_E1000=m
CONFIG_BNX2=m
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
-
-#
-# Ethernet (10000 Mbit)
-#
+CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
# CONFIG_CHELSIO_T3 is not set
# CONFIG_IXGB is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET_MII is not set
# CONFIG_USB_USBNET is not set
-
-#
-# Wan interfaces
-#
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
return 0;
}
+ /* When we miss an I/O space match on PCI, just pass it up
+ * to the next PCI bridge and/or controller.
+ */
+ if (!strcmp(bus->name, "pci") &&
+ (addr[0] & 0x03000000) == 0x01000000)
+ return 0;
+
return 1;
}
unsigned long flags, status;
int cnt, retries, this_cpu, prev_sent, i;
+ if (cpus_empty(mask))
+ return;
+
/* We have to do this whole thing with interrupts fully disabled.
* Otherwise if we send an xcall from interrupt context it will
* corrupt both our mondo block and cpu list state.
*/
if (nmi_watchdog_tick(regs,reason))
return;
- if (notify_die(DIE_NMI_POST, "nmi_post", regs, reason, 2, 0)
- == NOTIFY_STOP)
- return;
if (!do_nmi_callback(regs,cpu))
unknown_nmi_error(reason, regs);
return ret;
}
-EXPORT_SYMBOL(current_io_context);
/*
* If the current task has no IO context then create one and initialise it.
.device_id = PCI_DEVICE_ID_VIA_P4M800CE,
.chipset_name = "VT3314",
},
- /* CX700 */
+ /* VT3324 / CX700 */
{
- .device_id = PCI_DEVICE_ID_VIA_CX700,
+ .device_id = PCI_DEVICE_ID_VIA_VT3324,
.chipset_name = "CX700",
},
/* VT3336 */
ID(PCI_DEVICE_ID_VIA_83_87XX_1),
ID(PCI_DEVICE_ID_VIA_3296_0),
ID(PCI_DEVICE_ID_VIA_P4M800CE),
- ID(PCI_DEVICE_ID_VIA_CX700),
+ ID(PCI_DEVICE_ID_VIA_VT3324),
ID(PCI_DEVICE_ID_VIA_VT3336),
ID(PCI_DEVICE_ID_VIA_P4M890),
{ }
#include <linux/poll.h>
#include <linux/string.h>
#include <linux/ctype.h>
-#include <linux/delay.h>
#include <asm/atomic.h>
-#ifdef CONFIG_X86
-/* This is ugly, but I've determined that x86 is the only architecture
- that can reasonably support the IPMI NMI watchdog timeout at this
- time. If another architecture adds this capability somehow, it
- will have to be a somewhat different mechanism and I have no idea
- how it will work. So in the unlikely event that another
- architecture supports this, we can figure out a good generic
- mechanism for it at that time. */
-#define HAVE_DIE_NMI_POST
+#ifdef CONFIG_X86_LOCAL_APIC
+#include <asm/apic.h>
#endif
#define PFX "IPMI Watchdog: "
/* If a pretimeout occurs, this is used to allow only one panic to happen. */
static atomic_t preop_panic_excl = ATOMIC_INIT(-1);
-#ifdef HAVE_DIE_NMI_POST
-static int testing_nmi;
-static int nmi_handler_registered;
-#endif
-
static int ipmi_heartbeat(void);
static void panic_halt_ipmi_heartbeat(void);
int hbnow = 0;
- /* These can be cleared as we are setting the timeout. */
- ipmi_start_timer_on_heartbeat = 0;
- pretimeout_since_last_heartbeat = 0;
-
data[0] = 0;
WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
wait_for_completion(&set_timeout_wait);
- mutex_unlock(&set_timeout_lock);
-
if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
|| ((send_heartbeat_now)
&& (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
+ {
rv = ipmi_heartbeat();
+ }
+ mutex_unlock(&set_timeout_lock);
out:
return rv;
int rv;
struct ipmi_system_interface_addr addr;
- if (ipmi_ignore_heartbeat)
+ if (ipmi_ignore_heartbeat) {
return 0;
+ }
if (ipmi_start_timer_on_heartbeat) {
+ ipmi_start_timer_on_heartbeat = 0;
ipmi_watchdog_state = action_val;
return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
} else if (pretimeout_since_last_heartbeat) {
We don't want to set the action, though, we want to
leave that alone (thus it can't be combined with the
above operation. */
+ pretimeout_since_last_heartbeat = 0;
return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
}
printk(KERN_CRIT PFX "Unable to register misc device\n");
}
-#ifdef HAVE_DIE_NMI_POST
- if (nmi_handler_registered) {
- int old_pretimeout = pretimeout;
- int old_timeout = timeout;
- int old_preop_val = preop_val;
-
- /* Set the pretimeout to go off in a second and give
- ourselves plenty of time to stop the timer. */
- ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
- preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */
- pretimeout = 99;
- timeout = 100;
-
- testing_nmi = 1;
-
- rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
- if (rv) {
- printk(KERN_WARNING PFX "Error starting timer to"
- " test NMI: 0x%x. The NMI pretimeout will"
- " likely not work\n", rv);
- rv = 0;
- goto out_restore;
- }
-
- msleep(1500);
-
- if (testing_nmi != 2) {
- printk(KERN_WARNING PFX "IPMI NMI didn't seem to"
- " occur. The NMI pretimeout will"
- " likely not work\n");
- }
- out_restore:
- testing_nmi = 0;
- preop_val = old_preop_val;
- pretimeout = old_pretimeout;
- timeout = old_timeout;
- }
-#endif
-
out:
up_write(®ister_sem);
ipmi_watchdog_state = action_val;
ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
printk(KERN_INFO PFX "Starting now!\n");
- } else {
- /* Stop the timer now. */
- ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
- ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
}
}
up_write(®ister_sem);
}
-#ifdef HAVE_DIE_NMI_POST
+#ifdef HAVE_NMI_HANDLER
static int
-ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
+ipmi_nmi(void *dev_id, int cpu, int handled)
{
- if (val != DIE_NMI_POST)
- return NOTIFY_OK;
-
- if (testing_nmi) {
- testing_nmi = 2;
- return NOTIFY_STOP;
- }
-
/* If we are not expecting a timeout, ignore it. */
if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
- return NOTIFY_OK;
-
- if (preaction_val != WDOG_PRETIMEOUT_NMI)
- return NOTIFY_OK;
+ return NOTIFY_DONE;
/* If no one else handled the NMI, we assume it was the IPMI
watchdog. */
- if (preop_val == WDOG_PREOP_PANIC) {
+ if ((!handled) && (preop_val == WDOG_PREOP_PANIC)) {
/* On some machines, the heartbeat will give
an error and not work unless we re-enable
the timer. So do so. */
panic(PFX "pre-timeout");
}
- return NOTIFY_STOP;
+ return NOTIFY_DONE;
}
-static struct notifier_block ipmi_nmi_handler = {
- .notifier_call = ipmi_nmi
+static struct nmi_handler ipmi_nmi_handler =
+{
+ .link = LIST_HEAD_INIT(ipmi_nmi_handler.link),
+ .dev_name = "ipmi_watchdog",
+ .dev_id = NULL,
+ .handler = ipmi_nmi,
+ .priority = 0, /* Call us last. */
};
+int nmi_handler_registered;
#endif
static int wdog_reboot_handler(struct notifier_block *this,
preaction_val = WDOG_PRETIMEOUT_NONE;
else if (strcmp(inval, "pre_smi") == 0)
preaction_val = WDOG_PRETIMEOUT_SMI;
-#ifdef HAVE_DIE_NMI_POST
+#ifdef HAVE_NMI_HANDLER
else if (strcmp(inval, "pre_nmi") == 0)
preaction_val = WDOG_PRETIMEOUT_NMI;
#endif
static void check_parms(void)
{
-#ifdef HAVE_DIE_NMI_POST
+#ifdef HAVE_NMI_HANDLER
int do_nmi = 0;
int rv;
preop_op("preop_none", NULL);
do_nmi = 0;
}
+#ifdef CONFIG_X86_LOCAL_APIC
+ if (nmi_watchdog == NMI_IO_APIC) {
+ printk(KERN_WARNING PFX "nmi_watchdog is set to IO APIC"
+ " mode (value is %d), that is incompatible"
+ " with using NMI in the IPMI watchdog."
+ " Disabling IPMI nmi pretimeout.\n",
+ nmi_watchdog);
+ preaction_val = WDOG_PRETIMEOUT_NONE;
+ do_nmi = 0;
+ }
+#endif
}
if (do_nmi && !nmi_handler_registered) {
- rv = register_die_notifier(&ipmi_nmi_handler);
+ rv = request_nmi(&ipmi_nmi_handler);
if (rv) {
printk(KERN_WARNING PFX
"Can't register nmi handler\n");
} else
nmi_handler_registered = 1;
} else if (!do_nmi && nmi_handler_registered) {
- unregister_die_notifier(&ipmi_nmi_handler);
+ release_nmi(&ipmi_nmi_handler);
nmi_handler_registered = 0;
}
#endif
rv = ipmi_smi_watcher_register(&smi_watcher);
if (rv) {
-#ifdef HAVE_DIE_NMI_POST
- if (nmi_handler_registered)
- unregister_die_notifier(&ipmi_nmi_handler);
+#ifdef HAVE_NMI_HANDLER
+ if (preaction_val == WDOG_PRETIMEOUT_NMI)
+ release_nmi(&ipmi_nmi_handler);
#endif
atomic_notifier_chain_unregister(&panic_notifier_list,
&wdog_panic_notifier);
ipmi_smi_watcher_unregister(&smi_watcher);
ipmi_unregister_watchdog(watchdog_ifnum);
-#ifdef HAVE_DIE_NMI_POST
+#ifdef HAVE_NMI_HANDLER
if (nmi_handler_registered)
- unregister_die_notifier(&ipmi_nmi_handler);
+ release_nmi(&ipmi_nmi_handler);
#endif
atomic_notifier_chain_unregister(&panic_notifier_list,
# Andre Hedrick <andre@linux-ide.org>
#
-if BLOCK
-
-menu "ATA/ATAPI/MFM/RLL support"
- depends on HAS_IOMEM
-
-config IDE
+menuconfig IDE
tristate "ATA/ATAPI/MFM/RLL support"
+ depends on BLOCK
+ depends on HAS_IOMEM
---help---
If you say Y here, your kernel will be able to manage low cost mass
storage units such as ATA/(E)IDE and ATAPI units. The most common
config BLK_DEV_HD
def_bool BLK_DEV_HD_IDE || BLK_DEV_HD_ONLY
-endif
-
-endmenu
-
-endif
+endif # IDE
return 1; /* let the PIO routines handle this weirdness */
}
-static int cris_config_drive_for_dma (ide_drive_t *drive)
-{
- u8 speed = ide_max_dma_mode(drive);
-
- if (!speed)
- return 0;
-
- speed_cris_ide(drive, speed);
-
- return ide_dma_enable(drive);
-}
-
/*
* cris_dma_intr() is the handler for disk read/write DMA interrupts
*/
static int cris_dma_check(ide_drive_t *drive)
{
- if (ide_use_dma(drive) && cris_config_drive_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
return -1;
EXPORT_SYMBOL(__ide_dma_good_drive);
-int ide_use_dma(ide_drive_t *drive)
-{
- struct hd_driveid *id = drive->id;
- ide_hwif_t *hwif = drive->hwif;
-
- if ((id->capability & 1) == 0 || drive->autodma == 0)
- return 0;
-
- /* consult the list of known "bad" drives */
- if (__ide_dma_bad_drive(drive))
- return 0;
-
- /* capable of UltraDMA modes */
- if (id->field_valid & 4) {
- if (hwif->ultra_mask & id->dma_ultra)
- return 1;
- }
-
- /* capable of regular DMA modes */
- if (id->field_valid & 2) {
- if (hwif->mwdma_mask & id->dma_mword)
- return 1;
- if (hwif->swdma_mask & id->dma_1word)
- return 1;
- }
-
- /* consult the list of known "good" drives */
- if (__ide_dma_good_drive(drive) && id->eide_dma_time < 150)
- return 1;
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ide_use_dma);
-
static const u8 xfer_mode_bases[] = {
XFER_UDMA_0,
XFER_MW_DMA_0,
mask &= 0x07;
break;
case XFER_MW_DMA_0:
- mask = id->dma_mword & hwif->mwdma_mask;
+ if (id->field_valid & 2)
+ mask = id->dma_mword & hwif->mwdma_mask;
break;
case XFER_SW_DMA_0:
- mask = id->dma_1word & hwif->swdma_mask;
+ if (id->field_valid & 2)
+ mask = id->dma_1word & hwif->swdma_mask;
break;
default:
BUG();
{
u8 speed;
- /* TODO: use only ide_max_dma_mode() */
- if (!ide_use_dma(drive))
+ if ((drive->id->capability & 1) == 0 || drive->autodma == 0)
+ return 0;
+
+ /* consult the list of known "bad" drives */
+ if (__ide_dma_bad_drive(drive))
return 0;
speed = ide_max_dma_mode(drive);
if (!speed)
return 0;
- drive->hwif->speedproc(drive, speed);
+ if (drive->hwif->speedproc(drive, speed))
+ return 0;
- return ide_dma_enable(drive);
+ return 1;
}
EXPORT_SYMBOL_GPL(ide_tune_dma);
break;
if (drive->hwif->ide_dma_check == NULL)
break;
+ drive->hwif->dma_off_quietly(drive);
ide_set_dma(drive);
break;
}
EXPORT_SYMBOL(ide_rate_filter);
-int ide_dma_enable (ide_drive_t *drive)
-{
- ide_hwif_t *hwif = HWIF(drive);
- struct hd_driveid *id = drive->id;
-
- return ((int) ((((id->dma_ultra >> 8) & hwif->ultra_mask) ||
- ((id->dma_mword >> 8) & hwif->mwdma_mask) ||
- ((id->dma_1word >> 8) & hwif->swdma_mask)) ? 1 : 0));
-}
-
-EXPORT_SYMBOL(ide_dma_enable);
-
int ide_use_fast_pio(ide_drive_t *drive)
{
struct hd_driveid *id = drive->id;
err = 0;
if (arg) {
+ hwif->dma_off_quietly(drive);
if (ide_set_dma(drive) || hwif->ide_dma_on(drive))
err = -EIO;
} else
return (ide_config_drive_speed(drive, speed));
}
-
-/**
- * config_chipset_for_dma - set up DMA mode
- * @drive: drive to configure for
- *
- * Place a drive into DMA mode and tune the chipset for
- * the selected speed.
- *
- * Returns true if DMA mode can be used
- */
-
-static int config_chipset_for_dma (ide_drive_t *drive)
-{
- u8 speed = ide_max_dma_mode(drive);
-
- if (!(speed))
- return 0;
-
- (void) ali15x3_tune_chipset(drive, speed);
- return ide_dma_enable(drive);
-}
-
/**
* ali15x3_config_drive_for_dma - configure for DMA
* @drive: drive to configure
static int ali15x3_config_drive_for_dma(ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
- struct hd_driveid *id = drive->id;
-
- if ((m5229_revision<=0x20) && (drive->media!=ide_disk))
- goto ata_pio;
-
drive->init_speed = 0;
- if ((id != NULL) && ((id->capability & 1) != 0) && drive->autodma) {
- /* Consult the list of known "bad" drives */
- if (__ide_dma_bad_drive(drive))
- goto ata_pio;
- if ((id->field_valid & 4) && (m5229_revision >= 0xC2)) {
- if (id->dma_ultra & hwif->ultra_mask) {
- /* Force if Capable UltraDMA */
- int dma = config_chipset_for_dma(drive);
- if ((id->field_valid & 2) && !dma)
- goto try_dma_modes;
- }
- } else if (id->field_valid & 2) {
-try_dma_modes:
- if ((id->dma_mword & hwif->mwdma_mask) ||
- (id->dma_1word & hwif->swdma_mask)) {
- /* Force if Capable regular DMA modes */
- if (!config_chipset_for_dma(drive))
- goto ata_pio;
- }
- } else if (__ide_dma_good_drive(drive) &&
- (id->eide_dma_time < 150)) {
- /* Consult the list of known "good" drives */
- if (!config_chipset_for_dma(drive))
- goto ata_pio;
- } else {
- goto ata_pio;
- }
- } else {
-ata_pio:
- hwif->tuneproc(drive, 255);
- return -1;
- }
+ if (ide_tune_dma(drive))
+ return 0;
- return 0;
+ ali15x3_tune_drive(drive, 255);
+
+ return -1;
}
/**
return;
}
- hwif->atapi_dma = 1;
+ if (m5229_revision > 0x20)
+ hwif->atapi_dma = 1;
if (m5229_revision <= 0x20)
hwif->ultra_mask = 0x00; /* no udma */
return ide_config_drive_speed(drive, speed);
}
-static int config_chipset_for_dma (ide_drive_t *drive)
-{
- u8 speed = ide_max_dma_mode(drive);
-
- if (!speed)
- return 0;
-
- if (cmd64x_tune_chipset(drive, speed))
- return 0;
-
- return ide_dma_enable(drive);
-}
-
static int cmd64x_config_drive_for_dma (ide_drive_t *drive)
{
- if (ide_use_dma(drive) && config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive))
/*
- * linux/drivers/ide/pci/cs5530.c Version 0.7 Sept 10, 2002
+ * linux/drivers/ide/pci/cs5530.c Version 0.73 Mar 10 2007
*
* Copyright (C) 2000 Andre Hedrick <andre@linux-ide.org>
- * Ditto of GNU General Public License.
- *
* Copyright (C) 2000 Mark Lord <mlord@pobox.com>
+ * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
+ *
* May be copied or modified under the terms of the GNU General Public License
*
* Development of this chipset driver was funded
#define CS5530_BAD_PIO(timings) (((timings)&~0x80000000)==0x0000e132)
#define CS5530_BASEREG(hwif) (((hwif)->dma_base & ~0xf) + ((hwif)->channel ? 0x30 : 0x20))
+static void cs5530_tunepio(ide_drive_t *drive, u8 pio)
+{
+ unsigned long basereg = CS5530_BASEREG(drive->hwif);
+ unsigned int format = (inl(basereg + 4) >> 31) & 1;
+
+ outl(cs5530_pio_timings[format][pio], basereg + ((drive->dn & 1)<<3));
+}
+
/**
* cs5530_tuneproc - select/set PIO modes
*
static void cs5530_tuneproc (ide_drive_t *drive, u8 pio) /* pio=255 means "autotune" */
{
- ide_hwif_t *hwif = HWIF(drive);
- unsigned int format;
- unsigned long basereg = CS5530_BASEREG(hwif);
- static u8 modes[5] = { XFER_PIO_0, XFER_PIO_1, XFER_PIO_2, XFER_PIO_3, XFER_PIO_4};
-
pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
- if (!cs5530_set_xfer_mode(drive, modes[pio])) {
- format = (inl(basereg + 4) >> 31) & 1;
- outl(cs5530_pio_timings[format][pio],
- basereg+(drive->select.b.unit<<3));
+
+ if (cs5530_set_xfer_mode(drive, XFER_PIO_0 + pio) == 0)
+ cs5530_tunepio(drive, pio);
+}
+
+/**
+ * cs5530_udma_filter - UDMA filter
+ * @drive: drive
+ *
+ * cs5530_udma_filter() does UDMA mask filtering for the given drive
+ * taking into the consideration capabilities of the mate device.
+ *
+ * The CS5530 specifies that two drives sharing a cable cannot mix
+ * UDMA/MDMA. It has to be one or the other, for the pair, though
+ * different timings can still be chosen for each drive. We could
+ * set the appropriate timing bits on the fly, but that might be
+ * a bit confusing. So, for now we statically handle this requirement
+ * by looking at our mate drive to see what it is capable of, before
+ * choosing a mode for our own drive.
+ *
+ * Note: This relies on the fact we never fail from UDMA to MWDMA2
+ * but instead drop to PIO.
+ */
+
+static u8 cs5530_udma_filter(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ ide_drive_t *mate = &hwif->drives[(drive->dn & 1) ^ 1];
+ struct hd_driveid *mateid = mate->id;
+ u8 mask = hwif->ultra_mask;
+
+ if (mate->present == 0)
+ goto out;
+
+ if ((mateid->capability & 1) && __ide_dma_bad_drive(mate) == 0) {
+ if ((mateid->field_valid & 4) && (mateid->dma_ultra & 7))
+ goto out;
+ if ((mateid->field_valid & 2) && (mateid->dma_mword & 7))
+ mask = 0;
}
+out:
+ return mask;
}
/**
- * cs5530_config_dma - select/set DMA and UDMA modes
+ * cs5530_config_dma - set DMA/UDMA mode
* @drive: drive to tune
*
- * cs5530_config_dma() handles selection/setting of DMA/UDMA modes
- * for both the chipset and drive. The CS5530 has limitations about
- * mixing DMA/UDMA on the same cable.
+ * cs5530_config_dma() handles setting of DMA/UDMA mode
+ * for both the chipset and drive.
*/
-
-static int cs5530_config_dma (ide_drive_t *drive)
+
+static int cs5530_config_dma(ide_drive_t *drive)
{
- int udma_ok = 1, mode = 0;
- ide_hwif_t *hwif = HWIF(drive);
- int unit = drive->select.b.unit;
- ide_drive_t *mate = &hwif->drives[unit^1];
- struct hd_driveid *id = drive->id;
- unsigned int reg, timings = 0;
- unsigned long basereg;
+ if (ide_tune_dma(drive))
+ return 0;
- /*
- * Default to DMA-off in case we run into trouble here.
- */
- hwif->dma_off_quietly(drive);
+ return 1;
+}
- /*
- * The CS5530 specifies that two drives sharing a cable cannot
- * mix UDMA/MDMA. It has to be one or the other, for the pair,
- * though different timings can still be chosen for each drive.
- * We could set the appropriate timing bits on the fly,
- * but that might be a bit confusing. So, for now we statically
- * handle this requirement by looking at our mate drive to see
- * what it is capable of, before choosing a mode for our own drive.
- *
- * Note: This relies on the fact we never fail from UDMA to MWDMA_2
- * but instead drop to PIO
- */
- if (mate->present) {
- struct hd_driveid *mateid = mate->id;
- if (mateid && (mateid->capability & 1) &&
- !__ide_dma_bad_drive(mate)) {
- if ((mateid->field_valid & 4) &&
- (mateid->dma_ultra & 7))
- udma_ok = 1;
- else if ((mateid->field_valid & 2) &&
- (mateid->dma_mword & 7))
- udma_ok = 0;
- else
- udma_ok = 1;
- }
- }
+static int cs5530_tune_chipset(ide_drive_t *drive, u8 mode)
+{
+ unsigned long basereg;
+ unsigned int reg, timings = 0;
- /*
- * Now see what the current drive is capable of,
- * selecting UDMA only if the mate said it was ok.
- */
- if (id && (id->capability & 1) && drive->autodma &&
- !__ide_dma_bad_drive(drive)) {
- if (udma_ok && (id->field_valid & 4) && (id->dma_ultra & 7)) {
- if (id->dma_ultra & 4)
- mode = XFER_UDMA_2;
- else if (id->dma_ultra & 2)
- mode = XFER_UDMA_1;
- else if (id->dma_ultra & 1)
- mode = XFER_UDMA_0;
- }
- if (!mode && (id->field_valid & 2) && (id->dma_mword & 7)) {
- if (id->dma_mword & 4)
- mode = XFER_MW_DMA_2;
- else if (id->dma_mword & 2)
- mode = XFER_MW_DMA_1;
- else if (id->dma_mword & 1)
- mode = XFER_MW_DMA_0;
- }
- }
+ mode = ide_rate_filter(drive, mode);
/*
* Tell the drive to switch to the new mode; abort on failure.
*/
- if (!mode || cs5530_set_xfer_mode(drive, mode))
+ if (cs5530_set_xfer_mode(drive, mode))
return 1; /* failure */
/*
case XFER_MW_DMA_0: timings = 0x00077771; break;
case XFER_MW_DMA_1: timings = 0x00012121; break;
case XFER_MW_DMA_2: timings = 0x00002020; break;
+ case XFER_PIO_4:
+ case XFER_PIO_3:
+ case XFER_PIO_2:
+ case XFER_PIO_1:
+ case XFER_PIO_0:
+ cs5530_tunepio(drive, mode - XFER_PIO_0);
+ return 0;
default:
BUG();
break;
}
- basereg = CS5530_BASEREG(hwif);
+ basereg = CS5530_BASEREG(drive->hwif);
reg = inl(basereg + 4); /* get drive0 config register */
timings |= reg & 0x80000000; /* preserve PIO format bit */
- if (unit == 0) { /* are we configuring drive0? */
+ if ((drive-> dn & 1) == 0) { /* are we configuring drive0? */
outl(timings, basereg + 4); /* write drive0 config register */
} else {
if (timings & 0x00100000)
hwif->serialized = hwif->mate->serialized = 1;
hwif->tuneproc = &cs5530_tuneproc;
+ hwif->speedproc = &cs5530_tune_chipset;
+
basereg = CS5530_BASEREG(hwif);
d0_timings = inl(basereg + 0);
if (CS5530_BAD_PIO(d0_timings)) {
hwif->ultra_mask = 0x07;
hwif->mwdma_mask = 0x07;
+ hwif->udma_filter = cs5530_udma_filter;
hwif->ide_dma_check = &cs5530_config_dma;
if (!noautodma)
hwif->autodma = 1;
return ide_config_drive_speed(drive, speed);
}
-/**
- * config_chipset_for_dma - configure for DMA
- * @drive: drive to configure
- *
- * Called by the IDE layer when it wants the timings set up.
- */
-
-static int config_chipset_for_dma (ide_drive_t *drive)
-{
- u8 speed = ide_max_dma_mode(drive);
-
- if (speed == 0)
- return 0;
-
- it821x_tune_chipset(drive, speed);
-
- return ide_dma_enable(drive);
-}
-
/**
* it821x_configure_drive_for_dma - set up for DMA transfers
* @drive: drive we are going to set up
static int it821x_config_drive_for_dma (ide_drive_t *drive)
{
- if (ide_use_dma(drive) && config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
it821x_tuneproc(drive, 255);
return get_indexed_reg(hwif, 0x0b) & 0x04;
}
-static int config_chipset_for_dma(ide_drive_t *drive)
-{
- struct hd_driveid *id = drive->id;
- ide_hwif_t *hwif = HWIF(drive);
- u8 speed;
-
- if (id->capability & 4) {
- /*
- * Set IORDY_EN & PREFETCH_EN (this seems to have
- * NO real effect since this register is reloaded
- * by hardware when the transfer mode is selected)
- */
- u8 tmp, adj = (drive->dn & 1) ? 0x08 : 0x00;
-
- tmp = get_indexed_reg(hwif, 0x13 + adj);
- set_indexed_reg(hwif, 0x13 + adj, tmp | 0x03);
- }
-
- speed = ide_max_dma_mode(drive);
-
- if (!speed)
- return 0;
-
- (void) hwif->speedproc(drive, speed);
- return ide_dma_enable(drive);
-}
-
static int pdcnew_config_drive_xfer_rate(ide_drive_t *drive)
{
drive->init_speed = 0;
- if (ide_use_dma(drive) && config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive))
/*
- * linux/drivers/ide/pci/pdc202xx_old.c Version 0.36 Sept 11, 2002
+ * linux/drivers/ide/pci/pdc202xx_old.c Version 0.50 Mar 3, 2007
*
* Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2006-2007 MontaVista Software, Inc.
+ * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
*
* Promise Ultra33 cards with BIOS v1.20 through 1.28 will need this
* compiled into the kernel if you have more than one card installed.
NULL
};
-/* A Register */
-#define SYNC_ERRDY_EN 0xC0
-
-#define SYNC_IN 0x80 /* control bit, different for master vs. slave drives */
-#define ERRDY_EN 0x40 /* control bit, different for master vs. slave drives */
-#define IORDY_EN 0x20 /* PIO: IOREADY */
-#define PREFETCH_EN 0x10 /* PIO: PREFETCH */
-
-#define PA3 0x08 /* PIO"A" timing */
-#define PA2 0x04 /* PIO"A" timing */
-#define PA1 0x02 /* PIO"A" timing */
-#define PA0 0x01 /* PIO"A" timing */
-
-/* B Register */
-
-#define MB2 0x80 /* DMA"B" timing */
-#define MB1 0x40 /* DMA"B" timing */
-#define MB0 0x20 /* DMA"B" timing */
-
-#define PB4 0x10 /* PIO_FORCE 1:0 */
-
-#define PB3 0x08 /* PIO"B" timing */ /* PIO flow Control mode */
-#define PB2 0x04 /* PIO"B" timing */ /* PIO 4 */
-#define PB1 0x02 /* PIO"B" timing */ /* PIO 3 half */
-#define PB0 0x01 /* PIO"B" timing */ /* PIO 3 other half */
-
-/* C Register */
-#define IORDYp_NO_SPEED 0x4F
-#define SPEED_DIS 0x0F
-
-#define DMARQp 0x80
-#define IORDYp 0x40
-#define DMAR_EN 0x20
-#define DMAW_EN 0x10
-
-#define MC3 0x08 /* DMA"C" timing */
-#define MC2 0x04 /* DMA"C" timing */
-#define MC1 0x02 /* DMA"C" timing */
-#define MC0 0x01 /* DMA"C" timing */
+static void pdc_old_disable_66MHz_clock(ide_hwif_t *);
static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed)
{
u8 drive_pci = 0x60 + (drive->dn << 2);
u8 speed = ide_rate_filter(drive, xferspeed);
- u32 drive_conf;
- u8 AP, BP, CP, DP;
+ u8 AP = 0, BP = 0, CP = 0;
u8 TA = 0, TB = 0, TC = 0;
- if (drive->media != ide_disk &&
- drive->media != ide_cdrom && speed < XFER_SW_DMA_0)
- return -1;
-
+#if PDC202XX_DEBUG_DRIVE_INFO
+ u32 drive_conf = 0;
pci_read_config_dword(dev, drive_pci, &drive_conf);
- pci_read_config_byte(dev, (drive_pci), &AP);
- pci_read_config_byte(dev, (drive_pci)|0x01, &BP);
- pci_read_config_byte(dev, (drive_pci)|0x02, &CP);
- pci_read_config_byte(dev, (drive_pci)|0x03, &DP);
+#endif
- if (speed < XFER_SW_DMA_0) {
- if ((AP & 0x0F) || (BP & 0x07)) {
- /* clear PIO modes of lower 8421 bits of A Register */
- pci_write_config_byte(dev, (drive_pci), AP &~0x0F);
- pci_read_config_byte(dev, (drive_pci), &AP);
-
- /* clear PIO modes of lower 421 bits of B Register */
- pci_write_config_byte(dev, (drive_pci)|0x01, BP &~0x07);
- pci_read_config_byte(dev, (drive_pci)|0x01, &BP);
-
- pci_read_config_byte(dev, (drive_pci), &AP);
- pci_read_config_byte(dev, (drive_pci)|0x01, &BP);
- }
- } else {
- if ((BP & 0xF0) && (CP & 0x0F)) {
- /* clear DMA modes of upper 842 bits of B Register */
- /* clear PIO forced mode upper 1 bit of B Register */
- pci_write_config_byte(dev, (drive_pci)|0x01, BP &~0xF0);
- pci_read_config_byte(dev, (drive_pci)|0x01, &BP);
-
- /* clear DMA modes of lower 8421 bits of C Register */
- pci_write_config_byte(dev, (drive_pci)|0x02, CP &~0x0F);
- pci_read_config_byte(dev, (drive_pci)|0x02, &CP);
- }
- }
+ /*
+ * TODO: do this once per channel
+ */
+ if (dev->device != PCI_DEVICE_ID_PROMISE_20246)
+ pdc_old_disable_66MHz_clock(hwif);
- pci_read_config_byte(dev, (drive_pci), &AP);
- pci_read_config_byte(dev, (drive_pci)|0x01, &BP);
- pci_read_config_byte(dev, (drive_pci)|0x02, &CP);
+ pci_read_config_byte(dev, drive_pci, &AP);
+ pci_read_config_byte(dev, drive_pci + 1, &BP);
+ pci_read_config_byte(dev, drive_pci + 2, &CP);
switch(speed) {
- case XFER_UDMA_6: speed = XFER_UDMA_5;
case XFER_UDMA_5:
case XFER_UDMA_4: TB = 0x20; TC = 0x01; break;
case XFER_UDMA_2: TB = 0x20; TC = 0x01; break;
case XFER_UDMA_0:
case XFER_MW_DMA_2: TB = 0x60; TC = 0x03; break;
case XFER_MW_DMA_1: TB = 0x60; TC = 0x04; break;
- case XFER_MW_DMA_0:
+ case XFER_MW_DMA_0: TB = 0xE0; TC = 0x0F; break;
case XFER_SW_DMA_2: TB = 0x60; TC = 0x05; break;
case XFER_SW_DMA_1: TB = 0x80; TC = 0x06; break;
case XFER_SW_DMA_0: TB = 0xC0; TC = 0x0B; break;
}
if (speed < XFER_SW_DMA_0) {
- pci_write_config_byte(dev, (drive_pci), AP|TA);
- pci_write_config_byte(dev, (drive_pci)|0x01, BP|TB);
+ /*
+ * preserve SYNC_INT / ERDDY_EN bits while clearing
+ * Prefetch_EN / IORDY_EN / PA[3:0] bits of register A
+ */
+ AP &= ~0x3f;
+ if (drive->id->capability & 4)
+ AP |= 0x20; /* set IORDY_EN bit */
+ if (drive->media == ide_disk)
+ AP |= 0x10; /* set Prefetch_EN bit */
+ /* clear PB[4:0] bits of register B */
+ BP &= ~0x1f;
+ pci_write_config_byte(dev, drive_pci, AP | TA);
+ pci_write_config_byte(dev, drive_pci + 1, BP | TB);
} else {
- pci_write_config_byte(dev, (drive_pci)|0x01, BP|TB);
- pci_write_config_byte(dev, (drive_pci)|0x02, CP|TC);
+ /* clear MB[2:0] bits of register B */
+ BP &= ~0xe0;
+ /* clear MC[3:0] bits of register C */
+ CP &= ~0x0f;
+ pci_write_config_byte(dev, drive_pci + 1, BP | TB);
+ pci_write_config_byte(dev, drive_pci + 2, CP | TC);
}
#if PDC202XX_DEBUG_DRIVE_INFO
printk(KERN_DEBUG "%s: %s drive%d 0x%08x ",
drive->name, ide_xfer_verbose(speed),
drive->dn, drive_conf);
- pci_read_config_dword(dev, drive_pci, &drive_conf);
+ pci_read_config_dword(dev, drive_pci, &drive_conf);
printk("0x%08x\n", drive_conf);
-#endif /* PDC202XX_DEBUG_DRIVE_INFO */
+#endif
- return (ide_config_drive_speed(drive, speed));
+ return ide_config_drive_speed(drive, speed);
}
-
static void pdc202xx_tune_drive(ide_drive_t *drive, u8 pio)
{
pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
* Set the control register to use the 66MHz system
* clock for UDMA 3/4/5 mode operation when necessary.
*
+ * FIXME: this register is shared by both channels, some locking is needed
+ *
* It may also be possible to leave the 66MHz clock on
* and readjust the timing parameters.
*/
outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg);
}
-static int config_chipset_for_dma (ide_drive_t *drive)
-{
- struct hd_driveid *id = drive->id;
- ide_hwif_t *hwif = HWIF(drive);
- struct pci_dev *dev = hwif->pci_dev;
- u32 drive_conf = 0;
- u8 drive_pci = 0x60 + (drive->dn << 2);
- u8 test1 = 0, test2 = 0, speed = -1;
- u8 AP = 0;
-
- if (dev->device != PCI_DEVICE_ID_PROMISE_20246)
- pdc_old_disable_66MHz_clock(drive->hwif);
-
- drive_pci = 0x60 + (drive->dn << 2);
- pci_read_config_dword(dev, drive_pci, &drive_conf);
- if ((drive_conf != 0x004ff304) && (drive_conf != 0x004ff3c4))
- goto chipset_is_set;
-
- pci_read_config_byte(dev, drive_pci, &test1);
- if (!(test1 & SYNC_ERRDY_EN)) {
- if (drive->select.b.unit & 0x01) {
- pci_read_config_byte(dev, drive_pci - 4, &test2);
- if ((test2 & SYNC_ERRDY_EN) &&
- !(test1 & SYNC_ERRDY_EN)) {
- pci_write_config_byte(dev, drive_pci,
- test1|SYNC_ERRDY_EN);
- }
- } else {
- pci_write_config_byte(dev, drive_pci,
- test1|SYNC_ERRDY_EN);
- }
- }
-
-chipset_is_set:
-
- pci_read_config_byte(dev, (drive_pci), &AP);
- if (id->capability & 4) /* IORDY_EN */
- pci_write_config_byte(dev, (drive_pci), AP|IORDY_EN);
- pci_read_config_byte(dev, (drive_pci), &AP);
- if (drive->media == ide_disk) /* PREFETCH_EN */
- pci_write_config_byte(dev, (drive_pci), AP|PREFETCH_EN);
-
- speed = ide_max_dma_mode(drive);
-
- if (!(speed)) {
- /* restore original pci-config space */
- pci_write_config_dword(dev, drive_pci, drive_conf);
- return 0;
- }
-
- (void) hwif->speedproc(drive, speed);
- return ide_dma_enable(drive);
-}
-
static int pdc202xx_config_drive_xfer_rate (ide_drive_t *drive)
{
drive->init_speed = 0;
- if (ide_use_dma(drive) && config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive))
/*
- * linux/drivers/ide/pci/sc1200.c Version 0.91 28-Jan-2003
+ * linux/drivers/ide/pci/sc1200.c Version 0.94 Mar 10 2007
*
* Copyright (C) 2000-2002 Mark Lord <mlord@pobox.com>
+ * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
+ *
* May be copied or modified under the terms of the GNU General Public License
*
* Development of this chipset driver was funded
*/
//#define SC1200_BAD_PIO(timings) (((timings)&~0x80000000)==0x00009172)
-static int sc1200_autoselect_dma_mode (ide_drive_t *drive)
+static void sc1200_tunepio(ide_drive_t *drive, u8 pio)
{
- int udma_ok = 1, mode = 0;
- ide_hwif_t *hwif = HWIF(drive);
- int unit = drive->select.b.unit;
- ide_drive_t *mate = &hwif->drives[unit^1];
- struct hd_driveid *id = drive->id;
-
- /*
- * The SC1200 specifies that two drives sharing a cable cannot
- * mix UDMA/MDMA. It has to be one or the other, for the pair,
- * though different timings can still be chosen for each drive.
- * We could set the appropriate timing bits on the fly,
- * but that might be a bit confusing. So, for now we statically
- * handle this requirement by looking at our mate drive to see
- * what it is capable of, before choosing a mode for our own drive.
- */
- if (mate->present) {
- struct hd_driveid *mateid = mate->id;
- if (mateid && (mateid->capability & 1) && !__ide_dma_bad_drive(mate)) {
- if ((mateid->field_valid & 4) && (mateid->dma_ultra & 7))
- udma_ok = 1;
- else if ((mateid->field_valid & 2) && (mateid->dma_mword & 7))
- udma_ok = 0;
- else
- udma_ok = 1;
- }
- }
- /*
- * Now see what the current drive is capable of,
- * selecting UDMA only if the mate said it was ok.
- */
- if (id && (id->capability & 1) && hwif->autodma && !__ide_dma_bad_drive(drive)) {
- if (udma_ok && (id->field_valid & 4) && (id->dma_ultra & 7)) {
- if (id->dma_ultra & 4)
- mode = XFER_UDMA_2;
- else if (id->dma_ultra & 2)
- mode = XFER_UDMA_1;
- else if (id->dma_ultra & 1)
- mode = XFER_UDMA_0;
- }
- if (!mode && (id->field_valid & 2) && (id->dma_mword & 7)) {
- if (id->dma_mword & 4)
- mode = XFER_MW_DMA_2;
- else if (id->dma_mword & 2)
- mode = XFER_MW_DMA_1;
- else if (id->dma_mword & 1)
- mode = XFER_MW_DMA_0;
- }
- }
- return mode;
+ ide_hwif_t *hwif = drive->hwif;
+ struct pci_dev *pdev = hwif->pci_dev;
+ unsigned int basereg = hwif->channel ? 0x50 : 0x40, format = 0;
+
+ pci_read_config_dword(pdev, basereg + 4, &format);
+ format = (format >> 31) & 1;
+ if (format)
+ format += sc1200_get_pci_clock();
+ pci_write_config_dword(pdev, basereg + ((drive->dn & 1) << 3),
+ sc1200_pio_timings[format][pio]);
}
/*
- * sc1200_config_dma2() handles selection/setting of DMA/UDMA modes
- * for both the chipset and drive.
+ * The SC1200 specifies that two drives sharing a cable cannot mix
+ * UDMA/MDMA. It has to be one or the other, for the pair, though
+ * different timings can still be chosen for each drive. We could
+ * set the appropriate timing bits on the fly, but that might be
+ * a bit confusing. So, for now we statically handle this requirement
+ * by looking at our mate drive to see what it is capable of, before
+ * choosing a mode for our own drive.
*/
-static int sc1200_config_dma2 (ide_drive_t *drive, int mode)
+static u8 sc1200_udma_filter(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ ide_drive_t *mate = &hwif->drives[(drive->dn & 1) ^ 1];
+ struct hd_driveid *mateid = mate->id;
+ u8 mask = hwif->ultra_mask;
+
+ if (mate->present == 0)
+ goto out;
+
+ if ((mateid->capability & 1) && __ide_dma_bad_drive(mate) == 0) {
+ if ((mateid->field_valid & 4) && (mateid->dma_ultra & 7))
+ goto out;
+ if ((mateid->field_valid & 2) && (mateid->dma_mword & 7))
+ mask = 0;
+ }
+out:
+ return mask;
+}
+
+static int sc1200_tune_chipset(ide_drive_t *drive, u8 mode)
{
ide_hwif_t *hwif = HWIF(drive);
int unit = drive->select.b.unit;
unsigned short pci_clock;
unsigned int basereg = hwif->channel ? 0x50 : 0x40;
- /*
- * Default to DMA-off in case we run into trouble here.
- */
- hwif->dma_off_quietly(drive); /* turn off DMA while we fiddle */
- outb(inb(hwif->dma_base+2)&~(unit?0x40:0x20), hwif->dma_base+2); /* clear DMA_capable bit */
+ mode = ide_rate_filter(drive, mode);
/*
* Tell the drive to switch to the new mode; abort on failure.
*/
- if (!mode || sc1200_set_xfer_mode(drive, mode)) {
+ if (sc1200_set_xfer_mode(drive, mode)) {
printk("SC1200: set xfer mode failure\n");
return 1; /* failure */
}
+ switch (mode) {
+ case XFER_PIO_4:
+ case XFER_PIO_3:
+ case XFER_PIO_2:
+ case XFER_PIO_1:
+ case XFER_PIO_0:
+ sc1200_tunepio(drive, mode - XFER_PIO_0);
+ return 0;
+ }
+
pci_clock = sc1200_get_pci_clock();
/*
case PCI_CLK_66: timings = 0x00015151; break;
}
break;
- }
-
- if (timings == 0) {
- printk("%s: sc1200_config_dma: huh? mode=%02x clk=%x \n", drive->name, mode, pci_clock);
- return 1; /* failure */
+ default:
+ BUG();
+ break;
}
if (unit == 0) { /* are we configuring drive0? */
pci_write_config_dword(hwif->pci_dev, basereg+12, timings);
}
- outb(inb(hwif->dma_base+2)|(unit?0x40:0x20), hwif->dma_base+2); /* set DMA_capable bit */
-
return 0; /* success */
}
*/
static int sc1200_config_dma (ide_drive_t *drive)
{
- return sc1200_config_dma2(drive, sc1200_autoselect_dma_mode(drive));
+ if (ide_tune_dma(drive))
+ return 0;
+
+ return 1;
}
static void sc1200_tuneproc (ide_drive_t *drive, byte pio) /* mode=255 means "autotune" */
{
ide_hwif_t *hwif = HWIF(drive);
- unsigned int format;
- static byte modes[5] = {XFER_PIO_0, XFER_PIO_1, XFER_PIO_2, XFER_PIO_3, XFER_PIO_4};
int mode = -1;
+ /*
+ * bad abuse of ->tuneproc interface
+ */
switch (pio) {
case 200: mode = XFER_UDMA_0; break;
case 201: mode = XFER_UDMA_1; break;
}
if (mode != -1) {
printk("SC1200: %s: changing (U)DMA mode\n", drive->name);
- (void)sc1200_config_dma2(drive, mode);
+ hwif->dma_off_quietly(drive);
+ if (sc1200_tune_chipset(drive, mode) == 0)
+ hwif->dma_host_on(drive);
return;
}
pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
printk("SC1200: %s: setting PIO mode%d\n", drive->name, pio);
- if (!sc1200_set_xfer_mode(drive, modes[pio])) {
- unsigned int basereg = hwif->channel ? 0x50 : 0x40;
- pci_read_config_dword (hwif->pci_dev, basereg+4, &format);
- format = (format >> 31) & 1;
- if (format)
- format += sc1200_get_pci_clock();
- pci_write_config_dword(hwif->pci_dev, basereg + (drive->select.b.unit << 3), sc1200_pio_timings[format][pio]);
- }
+
+ if (sc1200_set_xfer_mode(drive, XFER_PIO_0 + pio) == 0)
+ sc1200_tunepio(drive, pio);
}
#ifdef CONFIG_PM
for (d = 0; d < MAX_DRIVES; ++d) {
ide_drive_t *drive = &(hwif->drives[d]);
if (drive->present && !__ide_dma_bad_drive(drive)) {
- int was_using_dma = drive->using_dma;
+ int enable_dma = drive->using_dma;
hwif->dma_off_quietly(drive);
- sc1200_config_dma(drive);
- if (!was_using_dma && drive->using_dma) {
- hwif->dma_off_quietly(drive);
- }
+ if (sc1200_config_dma(drive))
+ enable_dma = 0;
+ if (enable_dma)
+ hwif->dma_host_on(drive);
}
}
}
hwif->serialized = hwif->mate->serialized = 1;
hwif->autodma = 0;
if (hwif->dma_base) {
+ hwif->udma_filter = sc1200_udma_filter;
hwif->ide_dma_check = &sc1200_config_dma;
hwif->ide_dma_end = &sc1200_ide_dma_end;
if (!noautodma)
hwif->autodma = 1;
hwif->tuneproc = &sc1200_tuneproc;
+ hwif->speedproc = &sc1200_tune_chipset;
}
hwif->atapi_dma = 1;
hwif->ultra_mask = 0x07;
return ide_config_drive_speed(drive, speed);
}
-/**
- * scc_config_chipset_for_dma - configure for DMA
- * @drive: drive to configure
- *
- * Called by scc_config_drive_for_dma().
- */
-
-static int scc_config_chipset_for_dma(ide_drive_t *drive)
-{
- u8 speed = ide_max_dma_mode(drive);
-
- if (!speed)
- return 0;
-
- if (scc_tune_chipset(drive, speed))
- return 0;
-
- return ide_dma_enable(drive);
-}
-
/**
* scc_configure_drive_for_dma - set up for DMA transfers
* @drive: drive we are going to set up
static int scc_config_drive_for_dma(ide_drive_t *drive)
{
- if (ide_use_dma(drive) && scc_config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive))
/*
- * linux/drivers/ide/pci/serverworks.c Version 0.8 25 Ebr 2003
+ * linux/drivers/ide/pci/serverworks.c Version 0.9 Mar 4 2007
*
* Copyright (C) 1998-2000 Michel Aubry
* Copyright (C) 1998-2000 Andrzej Krzysztofowicz
* Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
* Portions copyright (c) 2001 Sun Microsystems
*
*
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
- u8 speed;
- u8 pio = ide_get_best_pio_mode(drive, 255, 5, NULL);
+ u8 speed = ide_rate_filter(drive, xferspeed);
+ u8 pio = ide_get_best_pio_mode(drive, 255, 4, NULL);
u8 unit = (drive->select.b.unit & 0x01);
u8 csb5 = svwks_csb_check(dev);
u8 ultra_enable = 0, ultra_timing = 0;
u8 dma_timing = 0, pio_timing = 0;
u16 csb5_pio = 0;
- if (xferspeed == 255) /* PIO auto-tuning */
- speed = XFER_PIO_0 + pio;
- else
- speed = ide_rate_filter(drive, xferspeed);
-
/* If we are about to put a disk into UDMA mode we screwed up.
Our code assumes we never _ever_ do this on an OSB4 */
case XFER_MW_DMA_2:
case XFER_MW_DMA_1:
case XFER_MW_DMA_0:
+ /*
+ * TODO: always setup PIO mode so this won't be needed
+ */
pio_timing |= pio_modes[pio];
csb5_pio |= (pio << (4*drive->dn));
dma_timing |= dma_modes[speed - XFER_MW_DMA_0];
case XFER_UDMA_2:
case XFER_UDMA_1:
case XFER_UDMA_0:
+ /*
+ * TODO: always setup PIO mode so this won't be needed
+ */
pio_timing |= pio_modes[pio];
csb5_pio |= (pio << (4*drive->dn));
dma_timing |= dma_modes[2];
return (ide_config_drive_speed(drive, speed));
}
-static void config_chipset_for_pio (ide_drive_t *drive)
-{
- u16 eide_pio_timing[6] = {960, 480, 240, 180, 120, 90};
- u16 xfer_pio = drive->id->eide_pio_modes;
- u8 timing, speed, pio;
-
- pio = ide_get_best_pio_mode(drive, 255, 5, NULL);
-
- if (xfer_pio > 4)
- xfer_pio = 0;
-
- if (drive->id->eide_pio_iordy > 0)
- for (xfer_pio = 5;
- xfer_pio>0 &&
- drive->id->eide_pio_iordy>eide_pio_timing[xfer_pio];
- xfer_pio--);
- else
- xfer_pio = (drive->id->eide_pio_modes & 4) ? 0x05 :
- (drive->id->eide_pio_modes & 2) ? 0x04 :
- (drive->id->eide_pio_modes & 1) ? 0x03 :
- (drive->id->tPIO & 2) ? 0x02 :
- (drive->id->tPIO & 1) ? 0x01 : xfer_pio;
-
- timing = (xfer_pio >= pio) ? xfer_pio : pio;
-
- switch(timing) {
- case 4: speed = XFER_PIO_4;break;
- case 3: speed = XFER_PIO_3;break;
- case 2: speed = XFER_PIO_2;break;
- case 1: speed = XFER_PIO_1;break;
- default:
- speed = (!drive->id->tPIO) ? XFER_PIO_0 : XFER_PIO_SLOW;
- break;
- }
- (void) svwks_tune_chipset(drive, speed);
- drive->current_speed = speed;
-}
-
static void svwks_tune_drive (ide_drive_t *drive, u8 pio)
{
- if(pio == 255)
- (void) svwks_tune_chipset(drive, 255);
- else
- (void) svwks_tune_chipset(drive, (XFER_PIO_0 + pio));
-}
-
-static int config_chipset_for_dma (ide_drive_t *drive)
-{
- u8 speed = ide_max_dma_mode(drive);
-
- if (!(speed))
- speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5, NULL);
-
- (void) svwks_tune_chipset(drive, speed);
- return ide_dma_enable(drive);
+ pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ (void)svwks_tune_chipset(drive, XFER_PIO_0 + pio);
}
static int svwks_config_drive_xfer_rate (ide_drive_t *drive)
{
drive->init_speed = 0;
- if (ide_use_dma(drive) && config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive))
- config_chipset_for_pio(drive);
+ svwks_tune_drive(drive, 255);
return -1;
}
return (ide_config_drive_speed(drive, speed));
}
-/**
- * config_chipset_for_dma - configure for DMA
- * @drive: drive to configure
- *
- * Called by the IDE layer when it wants the timings set up.
- * For the CMD680 we also need to set up the PIO timings and
- * enable DMA.
- */
-
-static int config_chipset_for_dma (ide_drive_t *drive)
-{
- u8 speed = ide_max_dma_mode(drive);
-
- if (!speed)
- return 0;
-
- if (siimage_tune_chipset(drive, speed))
- return 0;
-
- return ide_dma_enable(drive);
-}
-
/**
* siimage_configure_drive_for_dma - set up for DMA transfers
* @drive: drive we are going to set up
static int siimage_config_drive_for_dma (ide_drive_t *drive)
{
- if (ide_use_dma(drive) && config_chipset_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
if (ide_use_fast_pio(drive))
/*
- * linux/drivers/ide/pci/sis5513.c Version 0.16ac+vp Jun 18, 2003
+ * linux/drivers/ide/pci/sis5513.c Version 0.20 Mar 4, 2007
*
* Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
* Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
+ *
* May be copied or modified under the terms of the GNU General Public License
*
*
pci_write_config_byte(dev, 0x4b, reg4bh|rw_prefetch);
}
-
/* Set per-drive active and recovery time */
static void config_art_rwp_pio (ide_drive_t *drive, u8 pio)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
- u8 timing, drive_pci, test1, test2;
-
- u16 eide_pio_timing[6] = {600, 390, 240, 180, 120, 90};
- u16 xfer_pio = drive->id->eide_pio_modes;
+ u8 drive_pci, test1, test2;
config_drive_art_rwp(drive);
- pio = ide_get_best_pio_mode(drive, 255, pio, NULL);
-
- if (xfer_pio> 4)
- xfer_pio = 0;
-
- if (drive->id->eide_pio_iordy > 0) {
- for (xfer_pio = 5;
- (xfer_pio > 0) &&
- (drive->id->eide_pio_iordy > eide_pio_timing[xfer_pio]);
- xfer_pio--);
- } else {
- xfer_pio = (drive->id->eide_pio_modes & 4) ? 0x05 :
- (drive->id->eide_pio_modes & 2) ? 0x04 :
- (drive->id->eide_pio_modes & 1) ? 0x03 : xfer_pio;
- }
-
- timing = (xfer_pio >= pio) ? xfer_pio : pio;
/* In pre ATA_133 case, drives sit at 0x40 + 4*drive->dn */
drive_pci = 0x40;
test1 &= ~0x0F;
test2 &= ~0x07;
- switch(timing) {
+ switch(pio) {
case 4: test1 |= 0x01; test2 |= 0x03; break;
case 3: test1 |= 0x03; test2 |= 0x03; break;
case 2: test1 |= 0x04; test2 |= 0x04; break;
case 1: test1 |= 0x07; test2 |= 0x06; break;
+ case 0: /* PIO0: register setting == X000 */
default: break;
}
pci_write_config_byte(dev, drive_pci, test1);
pci_write_config_byte(dev, drive_pci+1, test2);
} else if (chipset_family < ATA_133) {
- switch(timing) { /* active recovery
+ switch(pio) { /* active recovery
v v */
case 4: test1 = 0x30|0x01; break;
case 3: test1 = 0x30|0x03; break;
pci_read_config_dword(dev, drive_pci, &test3);
test3 &= 0xc0c00fff;
if (test3 & 0x08) {
- test3 |= (unsigned long)ini_time_value[ATA_133][timing] << 12;
- test3 |= (unsigned long)act_time_value[ATA_133][timing] << 16;
- test3 |= (unsigned long)rco_time_value[ATA_133][timing] << 24;
+ test3 |= ini_time_value[ATA_133][pio] << 12;
+ test3 |= act_time_value[ATA_133][pio] << 16;
+ test3 |= rco_time_value[ATA_133][pio] << 24;
} else {
- test3 |= (unsigned long)ini_time_value[ATA_100][timing] << 12;
- test3 |= (unsigned long)act_time_value[ATA_100][timing] << 16;
- test3 |= (unsigned long)rco_time_value[ATA_100][timing] << 24;
+ test3 |= ini_time_value[ATA_100][pio] << 12;
+ test3 |= act_time_value[ATA_100][pio] << 16;
+ test3 |= rco_time_value[ATA_100][pio] << 24;
}
pci_write_config_dword(dev, drive_pci, test3);
}
}
-static int config_chipset_for_pio (ide_drive_t *drive, u8 pio)
+static int sis5513_tune_drive(ide_drive_t *drive, u8 pio)
{
- if (pio == 255)
- pio = ide_find_best_mode(drive, XFER_PIO | XFER_EPIO) - XFER_PIO_0;
+ pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
config_art_rwp_pio(drive, pio);
- return ide_config_drive_speed(drive, XFER_PIO_0 + min_t(u8, pio, 4));
+ return ide_config_drive_speed(drive, XFER_PIO_0 + pio);
+}
+
+static void sis5513_tuneproc(ide_drive_t *drive, u8 pio)
+{
+ (void)sis5513_tune_drive(drive, pio);
}
static int sis5513_tune_chipset (ide_drive_t *drive, u8 xferspeed)
case XFER_SW_DMA_1:
case XFER_SW_DMA_0:
break;
- case XFER_PIO_4: return((int) config_chipset_for_pio(drive, 4));
- case XFER_PIO_3: return((int) config_chipset_for_pio(drive, 3));
- case XFER_PIO_2: return((int) config_chipset_for_pio(drive, 2));
- case XFER_PIO_1: return((int) config_chipset_for_pio(drive, 1));
+ case XFER_PIO_4:
+ case XFER_PIO_3:
+ case XFER_PIO_2:
+ case XFER_PIO_1:
case XFER_PIO_0:
- default: return((int) config_chipset_for_pio(drive, 0));
+ return sis5513_tune_drive(drive, speed - XFER_PIO_0);
+ default:
+ BUG();
+ break;
}
- return ((int) ide_config_drive_speed(drive, speed));
-}
-
-static void sis5513_tune_drive (ide_drive_t *drive, u8 pio)
-{
- (void) config_chipset_for_pio(drive, pio);
+ return ide_config_drive_speed(drive, speed);
}
static int sis5513_config_xfer_rate(ide_drive_t *drive)
{
- config_art_rwp_pio(drive, 5);
+ /*
+ * TODO: always set PIO mode and remove this
+ */
+ sis5513_tuneproc(drive, 255);
drive->init_speed = 0;
return 0;
if (ide_use_fast_pio(drive))
- sis5513_tune_drive(drive, 5);
+ sis5513_tuneproc(drive, 255);
return -1;
}
if (!hwif->irq)
hwif->irq = hwif->channel ? 15 : 14;
- hwif->tuneproc = &sis5513_tune_drive;
+ hwif->tuneproc = &sis5513_tuneproc;
hwif->speedproc = &sis5513_tune_chipset;
if (!(hwif->dma_base)) {
pio = ide_get_best_pio_mode(drive, pio, 5, &p);
- drive->drive_data = drv_ctrl = get_pio_timings(&p);
+ drv_ctrl = get_pio_timings(&p);
+
+ /*
+ * Store the PIO timings so that we can restore them
+ * in case DMA will be turned off...
+ */
+ drive->drive_data &= 0xffff0000;
+ drive->drive_data |= drv_ctrl;
if (!drive->using_dma) {
/*
}
/*
- * Configure the drive for DMA.
- * We'll program the chipset only when DMA is actually turned on.
+ * Configure the drive and chipset for a new transfer speed.
*/
-static int config_for_dma(ide_drive_t *drive)
+static int sl82c105_tune_chipset(ide_drive_t *drive, u8 speed)
{
- DBG(("config_for_dma(drive:%s)\n", drive->name));
+ static u16 mwdma_timings[] = {0x0707, 0x0201, 0x0200};
+ u16 drv_ctrl;
- if (ide_config_drive_speed(drive, XFER_MW_DMA_2) != 0)
- return 0;
+ DBG(("sl82c105_tune_chipset(drive:%s, speed:%s)\n",
+ drive->name, ide_xfer_verbose(speed)));
- return ide_dma_enable(drive);
+ speed = ide_rate_filter(drive, speed);
+
+ switch (speed) {
+ case XFER_MW_DMA_2:
+ case XFER_MW_DMA_1:
+ case XFER_MW_DMA_0:
+ drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0];
+
+ /*
+ * Store the DMA timings so that we can actually program
+ * them when DMA will be turned on...
+ */
+ drive->drive_data &= 0x0000ffff;
+ drive->drive_data |= (unsigned long)drv_ctrl << 16;
+
+ /*
+ * If we are already using DMA, we just reprogram
+ * the drive control register.
+ */
+ if (drive->using_dma) {
+ struct pci_dev *dev = HWIF(drive)->pci_dev;
+ int reg = 0x44 + drive->dn * 4;
+
+ pci_write_config_word(dev, reg, drv_ctrl);
+ }
+ break;
+ case XFER_PIO_5:
+ case XFER_PIO_4:
+ case XFER_PIO_3:
+ case XFER_PIO_2:
+ case XFER_PIO_1:
+ case XFER_PIO_0:
+ (void) sl82c105_tune_pio(drive, speed - XFER_PIO_0);
+ break;
+ default:
+ return -1;
+ }
+
+ return ide_config_drive_speed(drive, speed);
}
/*
{
DBG(("sl82c105_ide_dma_check(drive:%s)\n", drive->name));
- if (ide_use_dma(drive) && config_for_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
return -1;
rc = __ide_dma_on(drive);
if (rc == 0) {
- pci_write_config_word(dev, reg, 0x0200);
+ pci_write_config_word(dev, reg, drive->drive_data >> 16);
printk(KERN_INFO "%s: DMA enabled\n", drive->name);
}
/*
* The bridge should be part of the same device, but function 0.
*/
- bridge = pci_find_slot(dev->bus->number,
+ bridge = pci_get_bus_and_slot(dev->bus->number,
PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
if (!bridge)
return -1;
*/
if (bridge->vendor != PCI_VENDOR_ID_WINBOND ||
bridge->device != PCI_DEVICE_ID_WINBOND_83C553 ||
- bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA)
+ bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) {
+ pci_dev_put(bridge);
return -1;
-
+ }
/*
* We need to find function 0's revision, not function 1
*/
pci_read_config_byte(bridge, PCI_REVISION_ID, &rev);
+ pci_dev_put(bridge);
return rev;
}
DBG(("init_hwif_sl82c105(hwif: ide%d)\n", hwif->index));
hwif->tuneproc = &sl82c105_tune_drive;
+ hwif->speedproc = &sl82c105_tune_chipset;
hwif->selectproc = &sl82c105_selectproc;
hwif->resetproc = &sl82c105_resetproc;
}
hwif->atapi_dma = 1;
- hwif->mwdma_mask = 0x04;
+ hwif->mwdma_mask = 0x07;
hwif->ide_dma_check = &sl82c105_ide_dma_check;
hwif->ide_dma_on = &sl82c105_ide_dma_on;
complete(&id_priv->comp);
}
-static void cma_release_remove(struct rdma_id_private *id_priv)
+static int cma_disable_remove(struct rdma_id_private *id_priv,
+ enum cma_state state)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&id_priv->lock, flags);
+ if (id_priv->state == state) {
+ atomic_inc(&id_priv->dev_remove);
+ ret = 0;
+ } else
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&id_priv->lock, flags);
+ return ret;
+}
+
+static void cma_enable_remove(struct rdma_id_private *id_priv)
{
if (atomic_dec_and_test(&id_priv->dev_remove))
wake_up(&id_priv->wait_remove);
}
+static int cma_has_cm_dev(struct rdma_id_private *id_priv)
+{
+ return (id_priv->id.device && id_priv->cm_id.ib);
+}
+
struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
void *context, enum rdma_port_space ps)
{
struct rdma_cm_event event;
int ret = 0;
- atomic_inc(&id_priv->dev_remove);
- if (!cma_comp(id_priv, CMA_CONNECT))
- goto out;
+ if (cma_disable_remove(id_priv, CMA_CONNECT))
+ return 0;
memset(&event, 0, sizeof event);
switch (ib_event->event) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL;
cma_exch(id_priv, CMA_DESTROYING);
- cma_release_remove(id_priv);
+ cma_enable_remove(id_priv);
rdma_destroy_id(&id_priv->id);
return ret;
}
out:
- cma_release_remove(id_priv);
+ cma_enable_remove(id_priv);
return ret;
}
int offset, ret;
listen_id = cm_id->context;
- atomic_inc(&listen_id->dev_remove);
- if (!cma_comp(listen_id, CMA_LISTEN)) {
- ret = -ECONNABORTED;
- goto out;
- }
+ if (cma_disable_remove(listen_id, CMA_LISTEN))
+ return -ECONNABORTED;
memset(&event, 0, sizeof event);
offset = cma_user_data_offset(listen_id->id.ps);
release_conn_id:
cma_exch(conn_id, CMA_DESTROYING);
- cma_release_remove(conn_id);
+ cma_enable_remove(conn_id);
rdma_destroy_id(&conn_id->id);
out:
- cma_release_remove(listen_id);
+ cma_enable_remove(listen_id);
return ret;
}
struct sockaddr_in *sin;
int ret = 0;
- memset(&event, 0, sizeof event);
- atomic_inc(&id_priv->dev_remove);
+ if (cma_disable_remove(id_priv, CMA_CONNECT))
+ return 0;
+ memset(&event, 0, sizeof event);
switch (iw_event->event) {
case IW_CM_EVENT_CLOSE:
event.event = RDMA_CM_EVENT_DISCONNECTED;
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.iw = NULL;
cma_exch(id_priv, CMA_DESTROYING);
- cma_release_remove(id_priv);
+ cma_enable_remove(id_priv);
rdma_destroy_id(&id_priv->id);
return ret;
}
- cma_release_remove(id_priv);
+ cma_enable_remove(id_priv);
return ret;
}
int ret;
listen_id = cm_id->context;
- atomic_inc(&listen_id->dev_remove);
- if (!cma_comp(listen_id, CMA_LISTEN)) {
- ret = -ECONNABORTED;
- goto out;
- }
+ if (cma_disable_remove(listen_id, CMA_LISTEN))
+ return -ECONNABORTED;
/* Create a new RDMA id for the new IW CM ID */
new_cm_id = rdma_create_id(listen_id->id.event_handler,
dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
if (!dev) {
ret = -EADDRNOTAVAIL;
- cma_release_remove(conn_id);
+ cma_enable_remove(conn_id);
rdma_destroy_id(new_cm_id);
goto out;
}
ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
if (ret) {
- cma_release_remove(conn_id);
+ cma_enable_remove(conn_id);
rdma_destroy_id(new_cm_id);
goto out;
}
ret = cma_acquire_dev(conn_id);
mutex_unlock(&lock);
if (ret) {
- cma_release_remove(conn_id);
+ cma_enable_remove(conn_id);
rdma_destroy_id(new_cm_id);
goto out;
}
/* User wants to destroy the CM ID */
conn_id->cm_id.iw = NULL;
cma_exch(conn_id, CMA_DESTROYING);
- cma_release_remove(conn_id);
+ cma_enable_remove(conn_id);
rdma_destroy_id(&conn_id->id);
}
out:
if (dev)
dev_put(dev);
- cma_release_remove(listen_id);
+ cma_enable_remove(listen_id);
return ret;
}
destroy = 1;
}
out:
- cma_release_remove(id_priv);
+ cma_enable_remove(id_priv);
cma_deref_id(id_priv);
if (destroy)
rdma_destroy_id(&id_priv->id);
if (id_priv->id.event_handler(&id_priv->id, &event)) {
cma_exch(id_priv, CMA_DESTROYING);
- cma_release_remove(id_priv);
+ cma_enable_remove(id_priv);
cma_deref_id(id_priv);
rdma_destroy_id(&id_priv->id);
return;
}
out:
- cma_release_remove(id_priv);
+ cma_enable_remove(id_priv);
cma_deref_id(id_priv);
}
struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
int ret = 0;
- memset(&event, 0, sizeof event);
- atomic_inc(&id_priv->dev_remove);
- if (!cma_comp(id_priv, CMA_CONNECT))
- goto out;
+ if (cma_disable_remove(id_priv, CMA_CONNECT))
+ return 0;
+ memset(&event, 0, sizeof event);
switch (ib_event->event) {
case IB_CM_SIDR_REQ_ERROR:
event.event = RDMA_CM_EVENT_UNREACHABLE;
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL;
cma_exch(id_priv, CMA_DESTROYING);
- cma_release_remove(id_priv);
+ cma_enable_remove(id_priv);
rdma_destroy_id(&id_priv->id);
return ret;
}
out:
- cma_release_remove(id_priv);
+ cma_enable_remove(id_priv);
return ret;
}
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp(id_priv, CMA_CONNECT))
+ if (!cma_has_cm_dev(id_priv))
return -EINVAL;
switch (id->device->node_type) {
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp(id_priv, CMA_CONNECT))
+ if (!cma_has_cm_dev(id_priv))
return -EINVAL;
switch (rdma_node_get_transport(id->device->node_type)) {
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp(id_priv, CMA_CONNECT) &&
- !cma_comp(id_priv, CMA_DISCONNECT))
+ if (!cma_has_cm_dev(id_priv))
return -EINVAL;
switch (rdma_node_get_transport(id->device->node_type)) {
int ret;
id_priv = mc->id_priv;
- atomic_inc(&id_priv->dev_remove);
- if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
- !cma_comp(id_priv, CMA_ADDR_RESOLVED))
- goto out;
+ if (cma_disable_remove(id_priv, CMA_ADDR_BOUND) &&
+ cma_disable_remove(id_priv, CMA_ADDR_RESOLVED))
+ return 0;
if (!status && id_priv->id.qp)
status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
ret = id_priv->id.event_handler(&id_priv->id, &event);
if (ret) {
cma_exch(id_priv, CMA_DESTROYING);
- cma_release_remove(id_priv);
+ cma_enable_remove(id_priv);
rdma_destroy_id(&id_priv->id);
return 0;
}
-out:
- cma_release_remove(id_priv);
+
+ cma_enable_remove(id_priv);
return 0;
}
extern spinlock_t ehca_qp_idr_lock;
extern spinlock_t ehca_cq_idr_lock;
+extern spinlock_t hcall_lock;
extern struct idr ehca_qp_idr;
extern struct idr ehca_cq_idr;
else {
struct ehca_cq *cq = eq->eqe_cache[i].cq;
comp_event_callback(cq);
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ spin_lock(&ehca_cq_idr_lock);
cq->nr_events--;
if (!cq->nr_events)
wake_up(&cq->wait_completion);
- spin_unlock_irqrestore(&ehca_cq_idr_lock,
- flags);
+ spin_unlock(&ehca_cq_idr_lock);
}
} else {
ehca_dbg(&shca->ib_device, "Got non completion event");
kthread_stop(task);
}
+#ifdef CONFIG_HOTPLUG_CPU
static void take_over_work(struct ehca_comp_pool *pool,
int cpu)
{
}
-#ifdef CONFIG_HOTPLUG_CPU
static int comp_pool_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
-MODULE_VERSION("SVNEHCA_0022");
+MODULE_VERSION("SVNEHCA_0023");
int ehca_open_aqp1 = 0;
int ehca_debug_level = 0;
int ehca_port_act_time = 30;
int ehca_poll_all_eqs = 1;
int ehca_static_rate = -1;
-int ehca_scaling_code = 1;
+int ehca_scaling_code = 0;
module_param_named(open_aqp1, ehca_open_aqp1, int, 0);
module_param_named(debug_level, ehca_debug_level, int, 0);
spinlock_t ehca_qp_idr_lock;
spinlock_t ehca_cq_idr_lock;
+spinlock_t hcall_lock;
DEFINE_IDR(ehca_qp_idr);
DEFINE_IDR(ehca_cq_idr);
DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
ehca_show_debug_level, ehca_store_debug_level);
-void ehca_create_driver_sysfs(struct ibmebus_driver *drv)
-{
- driver_create_file(&drv->driver, &driver_attr_debug_level);
-}
+static struct attribute *ehca_drv_attrs[] = {
+ &driver_attr_debug_level.attr,
+ NULL
+};
-void ehca_remove_driver_sysfs(struct ibmebus_driver *drv)
-{
- driver_remove_file(&drv->driver, &driver_attr_debug_level);
-}
+static struct attribute_group ehca_drv_attr_grp = {
+ .attrs = ehca_drv_attrs
+};
#define EHCA_RESOURCE_ATTR(name) \
static ssize_t ehca_show_##name(struct device *dev, \
}
static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
+static struct attribute *ehca_dev_attrs[] = {
+ &dev_attr_adapter_handle.attr,
+ &dev_attr_num_ports.attr,
+ &dev_attr_hw_ver.attr,
+ &dev_attr_max_eq.attr,
+ &dev_attr_cur_eq.attr,
+ &dev_attr_max_cq.attr,
+ &dev_attr_cur_cq.attr,
+ &dev_attr_max_qp.attr,
+ &dev_attr_cur_qp.attr,
+ &dev_attr_max_mr.attr,
+ &dev_attr_cur_mr.attr,
+ &dev_attr_max_mw.attr,
+ &dev_attr_cur_mw.attr,
+ &dev_attr_max_pd.attr,
+ &dev_attr_max_ah.attr,
+ NULL
+};
-void ehca_create_device_sysfs(struct ibmebus_dev *dev)
-{
- device_create_file(&dev->ofdev.dev, &dev_attr_adapter_handle);
- device_create_file(&dev->ofdev.dev, &dev_attr_num_ports);
- device_create_file(&dev->ofdev.dev, &dev_attr_hw_ver);
- device_create_file(&dev->ofdev.dev, &dev_attr_max_eq);
- device_create_file(&dev->ofdev.dev, &dev_attr_cur_eq);
- device_create_file(&dev->ofdev.dev, &dev_attr_max_cq);
- device_create_file(&dev->ofdev.dev, &dev_attr_cur_cq);
- device_create_file(&dev->ofdev.dev, &dev_attr_max_qp);
- device_create_file(&dev->ofdev.dev, &dev_attr_cur_qp);
- device_create_file(&dev->ofdev.dev, &dev_attr_max_mr);
- device_create_file(&dev->ofdev.dev, &dev_attr_cur_mr);
- device_create_file(&dev->ofdev.dev, &dev_attr_max_mw);
- device_create_file(&dev->ofdev.dev, &dev_attr_cur_mw);
- device_create_file(&dev->ofdev.dev, &dev_attr_max_pd);
- device_create_file(&dev->ofdev.dev, &dev_attr_max_ah);
-}
-
-void ehca_remove_device_sysfs(struct ibmebus_dev *dev)
-{
- device_remove_file(&dev->ofdev.dev, &dev_attr_adapter_handle);
- device_remove_file(&dev->ofdev.dev, &dev_attr_num_ports);
- device_remove_file(&dev->ofdev.dev, &dev_attr_hw_ver);
- device_remove_file(&dev->ofdev.dev, &dev_attr_max_eq);
- device_remove_file(&dev->ofdev.dev, &dev_attr_cur_eq);
- device_remove_file(&dev->ofdev.dev, &dev_attr_max_cq);
- device_remove_file(&dev->ofdev.dev, &dev_attr_cur_cq);
- device_remove_file(&dev->ofdev.dev, &dev_attr_max_qp);
- device_remove_file(&dev->ofdev.dev, &dev_attr_cur_qp);
- device_remove_file(&dev->ofdev.dev, &dev_attr_max_mr);
- device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mr);
- device_remove_file(&dev->ofdev.dev, &dev_attr_max_mw);
- device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mw);
- device_remove_file(&dev->ofdev.dev, &dev_attr_max_pd);
- device_remove_file(&dev->ofdev.dev, &dev_attr_max_ah);
-}
+static struct attribute_group ehca_dev_attr_grp = {
+ .attrs = ehca_dev_attrs
+};
static int __devinit ehca_probe(struct ibmebus_dev *dev,
const struct of_device_id *id)
}
}
- ehca_create_device_sysfs(dev);
+ ret = sysfs_create_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp);
+ if (ret) /* only complain; we can live without attributes */
+ ehca_err(&shca->ib_device,
+ "Cannot create device attributes ret=%d", ret);
spin_lock(&shca_list_lock);
list_add(&shca->shca_list, &shca_list);
struct ehca_shca *shca = dev->ofdev.dev.driver_data;
int ret;
- ehca_remove_device_sysfs(dev);
+ sysfs_remove_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp);
if (ehca_open_aqp1 == 1) {
int i;
int ret;
printk(KERN_INFO "eHCA Infiniband Device Driver "
- "(Rel.: SVNEHCA_0022)\n");
+ "(Rel.: SVNEHCA_0023)\n");
idr_init(&ehca_qp_idr);
idr_init(&ehca_cq_idr);
spin_lock_init(&ehca_qp_idr_lock);
spin_lock_init(&ehca_cq_idr_lock);
+ spin_lock_init(&hcall_lock);
INIT_LIST_HEAD(&shca_list);
spin_lock_init(&shca_list_lock);
goto module_init2;
}
- ehca_create_driver_sysfs(&ehca_driver);
+ ret = sysfs_create_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
+ if (ret) /* only complain; we can live without attributes */
+ ehca_gen_err("Cannot create driver attributes ret=%d", ret);
if (ehca_poll_all_eqs != 1) {
ehca_gen_err("WARNING!!!");
if (ehca_poll_all_eqs == 1)
del_timer_sync(&poll_eqs_timer);
- ehca_remove_driver_sysfs(&ehca_driver);
+ sysfs_remove_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
ibmebus_unregister_driver(&ehca_driver);
ehca_destroy_slab_caches();
goto create_qp_exit1;
}
+ my_qp->ib_qp.qp_num = my_qp->real_qp_num;
+
switch (init_attr->qp_type) {
case IB_QPT_RC:
if (isdaqp == 0) {
parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr;
parms.act_nr_send_sges = init_attr->cap.max_send_sge;
parms.act_nr_recv_sges = init_attr->cap.max_recv_sge;
- my_qp->real_qp_num =
+ my_qp->ib_qp.qp_num =
(init_attr->qp_type == IB_QPT_SMI) ? 0 : 1;
}
my_qp->ib_qp.recv_cq = init_attr->recv_cq;
my_qp->ib_qp.send_cq = init_attr->send_cq;
- my_qp->ib_qp.qp_num = my_qp->real_qp_num;
my_qp->ib_qp.qp_type = init_attr->qp_type;
my_qp->qp_type = init_attr->qp_type;
((ehca_mult - 1) / ah_mult) : 0;
else
mqpcb->max_static_rate = 0;
-
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
+ /*
+ * Always supply the GRH flag, even if it's zero, to give the
+ * hypervisor a clear "yes" or "no" instead of a "perhaps"
+ */
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
+
/*
* only if GRH is TRUE we might consider SOURCE_GID_IDX
* and DEST_GID otherwise phype will return H_ATTR_PARM!!!
*/
if (attr->ah_attr.ah_flags == IB_AH_GRH) {
- mqpcb->send_grh_flag = 1 << 31;
- update_mask |=
- EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
+ mqpcb->send_grh_flag = 1;
+
mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
update_mask |=
EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
unsigned long arg9)
{
long ret;
- int i, sleep_msecs;
+ int i, sleep_msecs, lock_is_set = 0;
+ unsigned long flags;
ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
"arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
arg8, arg9);
for (i = 0; i < 5; i++) {
+ if ((opcode == H_ALLOC_RESOURCE) && (arg2 == 5)) {
+ spin_lock_irqsave(&hcall_lock, flags);
+ lock_is_set = 1;
+ }
+
ret = plpar_hcall9(opcode, outs,
arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9);
+ if (lock_is_set)
+ spin_unlock_irqrestore(&hcall_lock, flags);
+
if (H_IS_LONG_BUSY(ret)) {
sleep_msecs = get_longbusy_msecs(ret);
msleep_interruptible(sleep_msecs);
opcode, ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7], outs[8]);
return ret;
-
}
return H_BUSY;
}
+
u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
struct ehca_pfeq *pfeq,
const u32 neq_control,
static int ipath_pe_intconfig(struct ipath_devdata *dd)
{
- u64 val;
u32 chiprev;
/*
if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) {
/* Rev2+ reports extra errors via internal GPIO pins */
dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask);
- val |= IPATH_GPIO_ERRINTR_MASK;
- ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val);
+ dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
+ dd->ipath_gpio_mask);
}
return 0;
}
gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT);
chk0rcv = 1;
}
- if (unlikely(gpiostatus)) {
+ if (gpiostatus) {
/*
* Some unexpected bits remain. If they could have
* caused the interrupt, complain and clear.
* GPIO interrupts, possibly on a "three strikes"
* basis.
*/
- u32 mask;
- mask = ipath_read_kreg32(
- dd, dd->ipath_kregs->kr_gpio_mask);
+ const u32 mask = (u32) dd->ipath_gpio_mask;
+
if (mask & gpiostatus) {
ipath_dbg("Unexpected GPIO IRQ bits %x\n",
gpiostatus & mask);
unsigned long ipath_pioavailshadow[8];
/* shadow of kr_gpio_out, for rmw ops */
u64 ipath_gpio_out;
+ /* shadow the gpio mask register */
+ u64 ipath_gpio_mask;
/* kr_revision shadow */
u64 ipath_revision;
/*
* processing.
*/
if (dd->ipath_flags & IPATH_GPIO_INTR) {
- u64 val;
ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
0x2074076542310ULL);
/* Enable GPIO bit 2 interrupt */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask);
- val |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
- ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val);
+ dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
+ dd->ipath_gpio_mask);
}
init_timer(&dd->verbs_timer);
u64 val;
/* Disable GPIO bit 2 interrupt */
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask);
- val &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
- ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val);
+ dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
+ dd->ipath_gpio_mask);
/*
* We might want to undo changes to debugportselect,
* but how?
ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!ibdev->uar_map)
goto err_uar;
+ MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
INIT_LIST_HEAD(&ibdev->pgdir_list);
mutex_init(&ibdev->pgdir_mutex);
{
struct mthca_cqe *cqe;
u32 prod_index;
- int nfreed = 0;
+ int i, nfreed = 0;
spin_lock_irq(&cq->lock);
}
if (nfreed) {
+ for (i = 0; i < nfreed; ++i)
+ set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe));
wmb();
cq->cons_index += nfreed;
update_cons_index(dev, cq, nfreed);
dev->kar + MTHCA_RECEIVE_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+ qp->rq.next_ind = ind;
qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
size0 = 0;
}
cm_id->context = p;
p->jiffies = jiffies;
spin_lock_irq(&priv->lock);
+ if (list_empty(&priv->cm.passive_ids))
+ queue_delayed_work(ipoib_workqueue,
+ &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
list_add(&p->list, &priv->cm.passive_ids);
spin_unlock_irq(&priv->lock);
- queue_delayed_work(ipoib_workqueue,
- &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
return 0;
err_rep:
if (!list_empty(&p->list))
list_move(&p->list, &priv->cm.passive_ids);
spin_unlock_irqrestore(&priv->lock, flags);
- queue_delayed_work(ipoib_workqueue,
- &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
}
}
kfree(p);
spin_lock_irq(&priv->lock);
}
+
+ if (!list_empty(&priv->cm.passive_ids))
+ queue_delayed_work(ipoib_workqueue,
+ &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
spin_unlock_irq(&priv->lock);
}
*/
#define MMC_SHIFT 3
-static int major;
-
/*
* There is one mmc_blk_data per slot.
*/
md->queue.issue_fn = mmc_blk_issue_rq;
md->queue.data = md;
- md->disk->major = major;
+ md->disk->major = MMC_BLOCK_MAJOR;
md->disk->first_minor = devidx << MMC_SHIFT;
md->disk->fops = &mmc_bdops;
md->disk->private_data = md;
{
int res = -ENOMEM;
- res = register_blkdev(major, "mmc");
- if (res < 0) {
- printk(KERN_WARNING "Unable to get major %d for MMC media: %d\n",
- major, res);
+ res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
+ if (res)
goto out;
- }
- if (major == 0)
- major = res;
return mmc_register_driver(&mmc_driver);
static void __exit mmc_blk_exit(void)
{
mmc_unregister_driver(&mmc_driver);
- unregister_blkdev(major, "mmc");
+ unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
}
module_init(mmc_blk_init);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
-module_param(major, int, 0444);
-MODULE_PARM_DESC(major, "specify the major device number for MMC block driver");
}
static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
- struct mmc_command *cmd)
+ struct mmc_command *cmd, unsigned int flags)
{
-
u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
switch (mmc_resp_type(cmd)) {
return MMC_ERR_INVALID;
}
- switch(cmd->opcode) {
- case MMC_READ_SINGLE_BLOCK:
- case SD_APP_SEND_SCR:
- mmccmd |= SD_CMD_CT_2;
- break;
- case MMC_READ_MULTIPLE_BLOCK:
- mmccmd |= SD_CMD_CT_4;
- break;
- case MMC_WRITE_BLOCK:
- mmccmd |= SD_CMD_CT_1;
- break;
-
- case MMC_WRITE_MULTIPLE_BLOCK:
- mmccmd |= SD_CMD_CT_3;
- break;
- case MMC_STOP_TRANSMISSION:
- mmccmd |= SD_CMD_CT_7;
- break;
+ if (flags & MMC_DATA_READ) {
+ if (flags & MMC_DATA_MULTI)
+ mmccmd |= SD_CMD_CT_4;
+ else
+ mmccmd |= SD_CMD_CT_2;
+ } else if (flags & MMC_DATA_WRITE) {
+ if (flags & MMC_DATA_MULTI)
+ mmccmd |= SD_CMD_CT_3;
+ else
+ mmccmd |= SD_CMD_CT_1;
}
au_writel(cmd->arg, HOST_CMDARG(host));
{
struct au1xmmc_host *host = mmc_priv(mmc);
+ unsigned int flags = 0;
int ret = MMC_ERR_NONE;
WARN_ON(irqs_disabled());
if (mrq->data) {
FLUSH_FIFO(host);
+ flags = mrq->data->flags;
ret = au1xmmc_prepare_data(host, mrq->data);
}
if (ret == MMC_ERR_NONE)
- ret = au1xmmc_send_command(host, 0, mrq->cmd);
+ ret = au1xmmc_send_command(host, 0, mrq->cmd, flags);
if (ret != MMC_ERR_NONE) {
mrq->cmd->error = ret;
/*
* workaround for erratum #42:
* Intel PXA27x Family Processor Specification Update Rev 001
+ * A bogus CRC error can appear if the msb of a 136 bit
+ * response is a one.
*/
- if (cmd->opcode == MMC_ALL_SEND_CID ||
- cmd->opcode == MMC_SEND_CSD ||
- cmd->opcode == MMC_SEND_CID) {
- /* a bogus CRC error can appear if the msb of
- the 15 byte response is a one */
- if ((cmd->resp[0] & 0x80000000) == 0)
- cmd->error = MMC_ERR_BADCRC;
- } else {
- pr_debug("ignoring CRC from command %d - *risky*\n",cmd->opcode);
- }
-#else
- cmd->error = MMC_ERR_BADCRC;
+ if (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000) {
+ pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
+ } else
#endif
+ cmd->error = MMC_ERR_BADCRC;
}
pxamci_disable_irq(host, END_CMD_RES);
if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
sdhci_transfer_pio(host);
+ /*
+ * We currently don't do anything fancy with DMA
+ * boundaries, but as we can't disable the feature
+ * we need to at least restart the transfer.
+ */
+ if (intmask & SDHCI_INT_DMA_END)
+ writel(readl(host->ioaddr + SDHCI_DMA_ADDRESS),
+ host->ioaddr + SDHCI_DMA_ADDRESS);
+
if (intmask & SDHCI_INT_DATA_END)
sdhci_finish_data(host);
}
config MLX4_DEBUG
bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED)
+ depends on MLX4_CORE
default y
---help---
This option causes debugging code to be compiled into the
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
- MLX4_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
-
err = mlx4_init_uar_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
struct mlx4_uar driver_uar;
void __iomem *kar;
- MLX4_DECLARE_DOORBELL_LOCK(doorbell_lock)
u32 rev_id;
char board_id[MLX4_BOARD_ID_LEN];
{
struct pci_dev *pdev = mac->pdev;
struct device_node *dn = pci_device_to_OF_node(pdev);
+ int len;
const u8 *maddr;
u8 addr[6];
return -ENOENT;
}
- maddr = of_get_property(dn, "local-mac-address", NULL);
+ maddr = of_get_property(dn, "local-mac-address", &len);
+
+ if (maddr && len == 6) {
+ memcpy(mac->mac_addr, maddr, 6);
+ return 0;
+ }
+
+ /* Some old versions of firmware mistakenly uses mac-address
+ * (and as a string) instead of a byte array in local-mac-address.
+ */
- /* Fall back to mac-address for older firmware */
if (maddr == NULL)
maddr = of_get_property(dn, "mac-address", NULL);
return -ENOENT;
}
+
if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
&addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
dev_warn(&pdev->dev,
return -EINVAL;
}
- memcpy(mac->mac_addr, addr, sizeof(addr));
+ memcpy(mac->mac_addr, addr, 6);
+
return 0;
}
static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac)
{
- unsigned int reg, stat;
+ unsigned int reg, pcnt;
/* Re-enable packet count interrupts: finally
* ack the packet count interrupt we got in rx_intr.
*/
- pci_read_config_dword(mac->iob_pdev,
- PAS_IOB_DMA_RXCH_STAT(mac->dma_rxch),
- &stat);
+ pcnt = *mac->rx_status & PAS_STATUS_PCNT_M;
- reg = PAS_IOB_DMA_RXCH_RESET_PCNT(stat & PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
- | PAS_IOB_DMA_RXCH_RESET_PINTC;
+ reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC;
pci_write_config_dword(mac->iob_pdev,
PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch),
static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac)
{
- unsigned int reg, stat;
+ unsigned int reg, pcnt;
/* Re-enable packet count interrupts */
- pci_read_config_dword(mac->iob_pdev,
- PAS_IOB_DMA_TXCH_STAT(mac->dma_txch), &stat);
+ pcnt = *mac->tx_status & PAS_STATUS_PCNT_M;
- reg = PAS_IOB_DMA_TXCH_RESET_PCNT(stat & PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
- | PAS_IOB_DMA_TXCH_RESET_PINTC;
+ reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
pci_write_config_dword(mac->iob_pdev,
PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg);
{
struct net_device *dev = data;
struct pasemi_mac *mac = netdev_priv(dev);
- unsigned int reg;
+ unsigned int reg, pcnt;
if (!(*mac->tx_status & PAS_STATUS_CAUSE_M))
return IRQ_NONE;
pasemi_mac_clean_tx(mac);
- reg = PAS_IOB_DMA_TXCH_RESET_PINTC;
+ pcnt = *mac->tx_status & PAS_STATUS_PCNT_M;
+
+ reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
if (*mac->tx_status & PAS_STATUS_SOFT)
reg |= PAS_IOB_DMA_TXCH_RESET_SINTC;
if (*mac->tx_status & PAS_STATUS_ERROR)
reg |= PAS_IOB_DMA_TXCH_RESET_DINTC;
- pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
+ pci_write_config_dword(mac->iob_pdev,
+ PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
reg);
return IRQ_HANDLED;
if (txring->next_to_clean - txring->next_to_use == TX_RING_SIZE) {
spin_unlock_irqrestore(&txring->lock, flags);
pasemi_mac_clean_tx(mac);
+ pasemi_mac_restart_tx_intr(mac);
spin_lock_irqsave(&txring->lock, flags);
if (txring->next_to_clean - txring->next_to_use ==
static struct pci_device_id pasemi_mac_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
+ { },
};
MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
#define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4)
#define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000
-#define PAS_IOB_DMA_RXCH_RESET_PCNT_S 0
+#define PAS_IOB_DMA_RXCH_RESET_PCNT_S 16
#define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
PAS_IOB_DMA_RXCH_RESET_PCNT_M)
#define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020
#define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001
#define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4)
#define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000
-#define PAS_IOB_DMA_TXCH_RESET_PCNT_S 0
+#define PAS_IOB_DMA_TXCH_RESET_PCNT_S 16
#define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
PAS_IOB_DMA_TXCH_RESET_PCNT_M)
#define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020
#include <linux/skbuff.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include "smc911x.h"
lp->ctl_rspeed = 100;
/* Grab the IRQ */
- retval = request_irq(dev->irq, &smc911x_interrupt, IRQF_SHARED, dev->name, dev);
+ retval = request_irq(dev->irq, &smc911x_interrupt,
+ IRQF_SHARED | IRQF_TRIGGER_FALLING, dev->name, dev);
if (retval)
goto err_out;
- set_irq_type(dev->irq, IRQT_FALLING);
-
#ifdef SMC_USE_DMA
lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq);
lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq);
/*
- * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ * Copyright (C) 2006-2007 Freescale Semicondutor, Inc. All rights reserved.
*
* Author: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
const struct ethtool_ops ucc_geth_ethtool_ops = { };
-static phy_interface_t to_phy_interface(const char *interface_type)
+static phy_interface_t to_phy_interface(const char *phy_connection_type)
{
- if (strcasecmp(interface_type, "mii") == 0)
+ if (strcasecmp(phy_connection_type, "mii") == 0)
return PHY_INTERFACE_MODE_MII;
- if (strcasecmp(interface_type, "gmii") == 0)
+ if (strcasecmp(phy_connection_type, "gmii") == 0)
return PHY_INTERFACE_MODE_GMII;
- if (strcasecmp(interface_type, "tbi") == 0)
+ if (strcasecmp(phy_connection_type, "tbi") == 0)
return PHY_INTERFACE_MODE_TBI;
- if (strcasecmp(interface_type, "rmii") == 0)
+ if (strcasecmp(phy_connection_type, "rmii") == 0)
return PHY_INTERFACE_MODE_RMII;
- if (strcasecmp(interface_type, "rgmii") == 0)
+ if (strcasecmp(phy_connection_type, "rgmii") == 0)
return PHY_INTERFACE_MODE_RGMII;
- if (strcasecmp(interface_type, "rgmii-id") == 0)
+ if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
return PHY_INTERFACE_MODE_RGMII_ID;
- if (strcasecmp(interface_type, "rtbi") == 0)
+ if (strcasecmp(phy_connection_type, "rtbi") == 0)
return PHY_INTERFACE_MODE_RTBI;
return PHY_INTERFACE_MODE_MII;
ug_info->phy_address = *prop;
/* get the phy interface type, or default to MII */
- prop = of_get_property(np, "interface-type", NULL);
+ prop = of_get_property(np, "phy-connection-type", NULL);
if (!prop) {
/* handle interface property present in old trees */
prop = of_get_property(phy, "interface", NULL);
- if (prop != NULL)
+ if (prop != NULL) {
phy_interface = enet_to_phy_interface[*prop];
- else
+ max_speed = enet_to_speed[*prop];
+ } else
phy_interface = PHY_INTERFACE_MODE_MII;
} else {
phy_interface = to_phy_interface((const char *)prop);
}
- /* get speed, or derive from interface */
- prop = of_get_property(np, "max-speed", NULL);
- if (!prop) {
- /* handle interface property present in old trees */
- prop = of_get_property(phy, "interface", NULL);
- if (prop != NULL)
- max_speed = enet_to_speed[*prop];
- } else {
- max_speed = *prop;
- }
- if (!max_speed) {
+ /* get speed, or derive from PHY interface */
+ if (max_speed == 0)
switch (phy_interface) {
case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_RGMII:
max_speed = SPEED_100;
break;
}
- }
if (max_speed == SPEED_1000) {
+ /* configure muram FIFOs for gigabit operation */
ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
/*
* drivers/net/ucc_geth_mii.c
*
- * Gianfar Ethernet Driver -- MIIM bus implementation
- * Provides Bus interface for MIIM regs
+ * QE UCC Gigabit Ethernet Driver -- MII Management Bus Implementation
+ * Provides Bus interface for MII Management regs in the UCC register space
*
- * Author: Li Yang
+ * Copyright (C) 2007 Freescale Semiconductor, Inc.
*
- * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ * Authors: Li Yang <leoli@freescale.com>
+ * Kim Phillips <kim.phillips@freescale.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
/*
* drivers/net/ucc_geth_mii.h
*
- * Gianfar Ethernet Driver -- MII Management Bus Implementation
- * Driver for the MDIO bus controller in the Gianfar register space
+ * QE UCC Gigabit Ethernet Driver -- MII Management Bus Implementation
+ * Provides Bus interface for MII Management regs in the UCC register space
*
- * Author: Andy Fleming
- * Maintainer: Kumar Gala
+ * Copyright (C) 2007 Freescale Semiconductor, Inc.
*
- * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
+ * Authors: Li Yang <leoli@freescale.com>
+ * Kim Phillips <kim.phillips@freescale.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
#include <asm/ebus.h>
#include <asm/spitfire.h>
#include <asm/bbc.h>
+#include <asm/io.h>
#include "bbc_i2c.h"
#include <asm/ebus.h> /* EBus device */
#include <asm/oplib.h> /* OpenProm Library */
#include <asm/uaccess.h> /* put_/get_user */
+#include <asm/io.h>
#include <asm/display7seg.h>
tristate "ESP Scsi Driver Core"
depends on SCSI
select SCSI_SPI_ATTRS
+ help
+ This is a core driver for NCR53c9x based scsi chipsets,
+ also known as "ESP" for Emulex Scsi Processor or
+ Enhanced Scsi Processor. This driver does not exist by
+ itself, there are front-end drivers which, when enabled,
+ select and enable this driver. One example is SCSI_SUNESP.
+ These front-end drivers provide probing, DMA, and register
+ access support for the core driver.
config SCSI_SUNESP
tristate "Sparc ESP Scsi Driver"
.name = "console",
.compatible = "qcn",
},
+ {
+ .name = "console",
+ .compatible = "SUNW,sun4v-console",
+ },
{},
};
MODULE_DEVICE_TABLE(of, hv_match);
config FB_XVR500
bool "Sun XVR-500 3DLABS Wildcat support"
- depends on FB && PCI && SPARC64
+ depends on (FB = y) && PCI && SPARC64
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
config FB_XVR2500
bool "Sun XVR-2500 3DLABS Wildcat support"
- depends on FB && PCI && SPARC64
+ depends on (FB = y) && PCI && SPARC64
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
/*
- * fs/eventpoll.c ( Efficent event polling implementation )
- * Copyright (C) 2001,...,2006 Davide Libenzi
+ * fs/eventpoll.c (Efficent event polling implementation)
+ * Copyright (C) 2001,...,2007 Davide Libenzi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <linux/hash.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
-#include <linux/rwsem.h>
#include <linux/rbtree.h>
#include <linux/wait.h>
#include <linux/eventpoll.h>
#include <asm/io.h>
#include <asm/mman.h>
#include <asm/atomic.h>
-#include <asm/semaphore.h>
/*
* LOCKING:
* There are three level of locking required by epoll :
*
* 1) epmutex (mutex)
- * 2) ep->sem (rw_semaphore)
- * 3) ep->lock (rw_lock)
+ * 2) ep->mtx (mutex)
+ * 3) ep->lock (spinlock)
*
* The acquire order is the one listed above, from 1 to 3.
* We need a spinlock (ep->lock) because we manipulate objects
* a spinlock. During the event transfer loop (from kernel to
* user space) we could end up sleeping due a copy_to_user(), so
* we need a lock that will allow us to sleep. This lock is a
- * read-write semaphore (ep->sem). It is acquired on read during
- * the event transfer loop and in write during epoll_ctl(EPOLL_CTL_DEL)
- * and during eventpoll_release_file(). Then we also need a global
- * semaphore to serialize eventpoll_release_file() and ep_free().
- * This semaphore is acquired by ep_free() during the epoll file
+ * mutex (ep->mtx). It is acquired during the event transfer loop,
+ * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
+ * Then we also need a global mutex to serialize eventpoll_release_file()
+ * and ep_free().
+ * This mutex is acquired by ep_free() during the epoll file
* cleanup path and it is also acquired by eventpoll_release_file()
* if a file has been pushed inside an epoll set and it is then
* close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
- * It is possible to drop the "ep->sem" and to use the global
- * semaphore "epmutex" (together with "ep->lock") to have it working,
- * but having "ep->sem" will make the interface more scalable.
+ * It is possible to drop the "ep->mtx" and to use the global
+ * mutex "epmutex" (together with "ep->lock") to have it working,
+ * but having "ep->mtx" will make the interface more scalable.
* Events that require holding "epmutex" are very rare, while for
- * normal operations the epoll private "ep->sem" will guarantee
- * a greater scalability.
+ * normal operations the epoll private "ep->mtx" will guarantee
+ * a better scalability.
*/
#define DEBUG_EPOLL 0
#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
+#define EP_UNACTIVE_PTR ((void *) -1L)
+
struct epoll_filefd {
struct file *file;
int fd;
* Node that is linked into the "wake_task_list" member of the "struct poll_safewake".
* It is used to keep track on all tasks that are currently inside the wake_up() code
* to 1) short-circuit the one coming from the same task and same wait queue head
- * ( loop ) 2) allow a maximum number of epoll descriptors inclusion nesting
+ * (loop) 2) allow a maximum number of epoll descriptors inclusion nesting
* 3) let go the ones coming from other tasks.
*/
struct wake_task_node {
spinlock_t lock;
};
+/*
+ * Each file descriptor added to the eventpoll interface will
+ * have an entry of this type linked to the "rbr" RB tree.
+ */
+struct epitem {
+ /* RB tree node used to link this structure to the eventpoll RB tree */
+ struct rb_node rbn;
+
+ /* List header used to link this structure to the eventpoll ready list */
+ struct list_head rdllink;
+
+ /*
+ * Works together "struct eventpoll"->ovflist in keeping the
+ * single linked chain of items.
+ */
+ struct epitem *next;
+
+ /* The file descriptor information this item refers to */
+ struct epoll_filefd ffd;
+
+ /* Number of active wait queue attached to poll operations */
+ int nwait;
+
+ /* List containing poll wait queues */
+ struct list_head pwqlist;
+
+ /* The "container" of this item */
+ struct eventpoll *ep;
+
+ /* List header used to link this item to the "struct file" items list */
+ struct list_head fllink;
+
+ /* The structure that describe the interested events and the source fd */
+ struct epoll_event event;
+};
+
/*
* This structure is stored inside the "private_data" member of the file
* structure and rapresent the main data sructure for the eventpoll
*/
struct eventpoll {
/* Protect the this structure access */
- rwlock_t lock;
+ spinlock_t lock;
/*
- * This semaphore is used to ensure that files are not removed
- * while epoll is using them. This is read-held during the event
- * collection loop and it is write-held during the file cleanup
- * path, the epoll file exit code and the ctl operations.
+ * This mutex is used to ensure that files are not removed
+ * while epoll is using them. This is held during the event
+ * collection loop, the file cleanup path, the epoll file exit
+ * code and the ctl operations.
*/
- struct rw_semaphore sem;
+ struct mutex mtx;
/* Wait queue used by sys_epoll_wait() */
wait_queue_head_t wq;
/* List of ready file descriptors */
struct list_head rdllist;
- /* RB-Tree root used to store monitored fd structs */
+ /* RB tree root used to store monitored fd structs */
struct rb_root rbr;
+
+ /*
+ * This is a single linked list that chains all the "struct epitem" that
+ * happened while transfering ready events to userspace w/out
+ * holding ->lock.
+ */
+ struct epitem *ovflist;
};
/* Wait structure used by the poll hooks */
wait_queue_head_t *whead;
};
-/*
- * Each file descriptor added to the eventpoll interface will
- * have an entry of this type linked to the "rbr" RB tree.
- */
-struct epitem {
- /* RB-Tree node used to link this structure to the eventpoll rb-tree */
- struct rb_node rbn;
-
- /* List header used to link this structure to the eventpoll ready list */
- struct list_head rdllink;
-
- /* The file descriptor information this item refers to */
- struct epoll_filefd ffd;
-
- /* Number of active wait queue attached to poll operations */
- int nwait;
-
- /* List containing poll wait queues */
- struct list_head pwqlist;
-
- /* The "container" of this item */
- struct eventpoll *ep;
-
- /* The structure that describe the interested events and the source fd */
- struct epoll_event event;
-
- /*
- * Used to keep track of the usage count of the structure. This avoids
- * that the structure will desappear from underneath our processing.
- */
- atomic_t usecnt;
-
- /* List header used to link this item to the "struct file" items list */
- struct list_head fllink;
-};
-
/* Wrapper struct used by poll queueing */
struct ep_pqueue {
poll_table pt;
};
/*
- * This semaphore is used to serialize ep_free() and eventpoll_release_file().
+ * This mutex is used to serialize ep_free() and eventpoll_release_file().
*/
static struct mutex epmutex;
static struct kmem_cache *pwq_cache __read_mostly;
-/* Setup the structure that is used as key for the rb-tree */
+/* Setup the structure that is used as key for the RB tree */
static inline void ep_set_ffd(struct epoll_filefd *ffd,
struct file *file, int fd)
{
ffd->fd = fd;
}
-/* Compare rb-tree keys */
+/* Compare RB tree keys */
static inline int ep_cmp_ffd(struct epoll_filefd *p1,
struct epoll_filefd *p2)
{
(p1->file < p2->file ? -1 : p1->fd - p2->fd));
}
-/* Special initialization for the rb-tree node to detect linkage */
+/* Special initialization for the RB tree node to detect linkage */
static inline void ep_rb_initnode(struct rb_node *n)
{
rb_set_parent(n, n);
}
-/* Removes a node from the rb-tree and marks it for a fast is-linked check */
+/* Removes a node from the RB tree and marks it for a fast is-linked check */
static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r)
{
rb_erase(n, r);
rb_set_parent(n, n);
}
-/* Fast check to verify that the item is linked to the main rb-tree */
+/* Fast check to verify that the item is linked to the main RB tree */
static inline int ep_rb_linked(struct rb_node *n)
{
return rb_parent(n) != n;
}
}
-/*
- * Unlink the "struct epitem" from all places it might have been hooked up.
- * This function must be called with write IRQ lock on "ep->lock".
- */
-static int ep_unlink(struct eventpoll *ep, struct epitem *epi)
-{
- int error;
-
- /*
- * It can happen that this one is called for an item already unlinked.
- * The check protect us from doing a double unlink ( crash ).
- */
- error = -ENOENT;
- if (!ep_rb_linked(&epi->rbn))
- goto error_return;
-
- /*
- * Clear the event mask for the unlinked item. This will avoid item
- * notifications to be sent after the unlink operation from inside
- * the kernel->userspace event transfer loop.
- */
- epi->event.events = 0;
-
- /*
- * At this point is safe to do the job, unlink the item from our rb-tree.
- * This operation togheter with the above check closes the door to
- * double unlinks.
- */
- ep_rb_erase(&epi->rbn, &ep->rbr);
-
- /*
- * If the item we are going to remove is inside the ready file descriptors
- * we want to remove it from this list to avoid stale events.
- */
- if (ep_is_linked(&epi->rdllink))
- list_del_init(&epi->rdllink);
-
- error = 0;
-error_return:
-
- DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n",
- current, ep, epi->ffd.file, error));
-
- return error;
-}
-
-/*
- * Increment the usage count of the "struct epitem" making it sure
- * that the user will have a valid pointer to reference.
- */
-static void ep_use_epitem(struct epitem *epi)
-{
- atomic_inc(&epi->usecnt);
-}
-
-/*
- * Decrement ( release ) the usage count by signaling that the user
- * has finished using the structure. It might lead to freeing the
- * structure itself if the count goes to zero.
- */
-static void ep_release_epitem(struct epitem *epi)
-{
- if (atomic_dec_and_test(&epi->usecnt))
- kmem_cache_free(epi_cache, epi);
-}
-
/*
* Removes a "struct epitem" from the eventpoll RB tree and deallocates
- * all the associated resources.
+ * all the associated resources. Must be called with "mtx" held.
*/
static int ep_remove(struct eventpoll *ep, struct epitem *epi)
{
- int error;
unsigned long flags;
struct file *file = epi->ffd.file;
list_del_init(&epi->fllink);
spin_unlock(&file->f_ep_lock);
- /* We need to acquire the write IRQ lock before calling ep_unlink() */
- write_lock_irqsave(&ep->lock, flags);
-
- /* Really unlink the item from the RB tree */
- error = ep_unlink(ep, epi);
-
- write_unlock_irqrestore(&ep->lock, flags);
+ if (ep_rb_linked(&epi->rbn))
+ ep_rb_erase(&epi->rbn, &ep->rbr);
- if (error)
- goto error_return;
+ spin_lock_irqsave(&ep->lock, flags);
+ if (ep_is_linked(&epi->rdllink))
+ list_del_init(&epi->rdllink);
+ spin_unlock_irqrestore(&ep->lock, flags);
/* At this point it is safe to free the eventpoll item */
- ep_release_epitem(epi);
+ kmem_cache_free(epi_cache, epi);
- error = 0;
-error_return:
- DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p) = %d\n",
- current, ep, file, error));
+ DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n",
+ current, ep, file));
- return error;
+ return 0;
}
static void ep_free(struct eventpoll *ep)
/*
* We need to lock this because we could be hit by
* eventpoll_release_file() while we're freeing the "struct eventpoll".
- * We do not need to hold "ep->sem" here because the epoll file
+ * We do not need to hold "ep->mtx" here because the epoll file
* is on the way to be removed and no one has references to it
* anymore. The only hit might come from eventpoll_release_file() but
* holding "epmutex" is sufficent here.
/*
* Walks through the whole tree by freeing each "struct epitem". At this
* point we are sure no poll callbacks will be lingering around, and also by
- * write-holding "sem" we can be sure that no file cleanup code will hit
+ * holding "epmutex" we can be sure that no file cleanup code will hit
* us during this operation. So we can avoid the lock on "ep->lock".
*/
while ((rbp = rb_first(&ep->rbr)) != 0) {
}
mutex_unlock(&epmutex);
+ mutex_destroy(&ep->mtx);
+ kfree(ep);
}
static int ep_eventpoll_release(struct inode *inode, struct file *file)
{
struct eventpoll *ep = file->private_data;
- if (ep) {
+ if (ep)
ep_free(ep);
- kfree(ep);
- }
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep));
return 0;
poll_wait(file, &ep->poll_wait, wait);
/* Check our condition */
- read_lock_irqsave(&ep->lock, flags);
+ spin_lock_irqsave(&ep->lock, flags);
if (!list_empty(&ep->rdllist))
pollflags = POLLIN | POLLRDNORM;
- read_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irqrestore(&ep->lock, flags);
return pollflags;
}
* We don't want to get "file->f_ep_lock" because it is not
* necessary. It is not necessary because we're in the "struct file"
* cleanup path, and this means that noone is using this file anymore.
- * The only hit might come from ep_free() but by holding the semaphore
+ * So, for example, epoll_ctl() cannot hit here sicne if we reach this
+ * point, the file counter already went to zero and fget() would fail.
+ * The only hit might come from ep_free() but by holding the mutex
* will correctly serialize the operation. We do need to acquire
- * "ep->sem" after "epmutex" because ep_remove() requires it when called
+ * "ep->mtx" after "epmutex" because ep_remove() requires it when called
* from anywhere but ep_free().
*/
mutex_lock(&epmutex);
ep = epi->ep;
list_del_init(&epi->fllink);
- down_write(&ep->sem);
+ mutex_lock(&ep->mtx);
ep_remove(ep, epi);
- up_write(&ep->sem);
+ mutex_unlock(&ep->mtx);
}
mutex_unlock(&epmutex);
if (!ep)
return -ENOMEM;
- rwlock_init(&ep->lock);
- init_rwsem(&ep->sem);
+ spin_lock_init(&ep->lock);
+ mutex_init(&ep->mtx);
init_waitqueue_head(&ep->wq);
init_waitqueue_head(&ep->poll_wait);
INIT_LIST_HEAD(&ep->rdllist);
ep->rbr = RB_ROOT;
+ ep->ovflist = EP_UNACTIVE_PTR;
*pep = ep;
}
/*
- * Search the file inside the eventpoll tree. It add usage count to
- * the returned item, so the caller must call ep_release_epitem()
- * after finished using the "struct epitem".
+ * Search the file inside the eventpoll tree. The RB tree operations
+ * are protected by the "mtx" mutex, and ep_find() must be called with
+ * "mtx" held.
*/
static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
{
int kcmp;
- unsigned long flags;
struct rb_node *rbp;
struct epitem *epi, *epir = NULL;
struct epoll_filefd ffd;
ep_set_ffd(&ffd, file, fd);
- read_lock_irqsave(&ep->lock, flags);
for (rbp = ep->rbr.rb_node; rbp; ) {
epi = rb_entry(rbp, struct epitem, rbn);
kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
else if (kcmp < 0)
rbp = rbp->rb_left;
else {
- ep_use_epitem(epi);
epir = epi;
break;
}
}
- read_unlock_irqrestore(&ep->lock, flags);
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n",
current, file, epir));
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
current, epi->ffd.file, epi, ep));
- write_lock_irqsave(&ep->lock, flags);
+ spin_lock_irqsave(&ep->lock, flags);
/*
* If the event mask does not contain any poll(2) event, we consider the
* until the next EPOLL_CTL_MOD will be issued.
*/
if (!(epi->event.events & ~EP_PRIVATE_BITS))
- goto is_disabled;
+ goto out_unlock;
+
+ /*
+ * If we are trasfering events to userspace, we can hold no locks
+ * (because we're accessing user memory, and because of linux f_op->poll()
+ * semantics). All the events that happens during that period of time are
+ * chained in ep->ovflist and requeued later on.
+ */
+ if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
+ if (epi->next == EP_UNACTIVE_PTR) {
+ epi->next = ep->ovflist;
+ ep->ovflist = epi;
+ }
+ goto out_unlock;
+ }
/* If this file is already in the ready list we exit soon */
if (ep_is_linked(&epi->rdllink))
if (waitqueue_active(&ep->poll_wait))
pwake++;
-is_disabled:
- write_unlock_irqrestore(&ep->lock, flags);
+out_unlock:
+ spin_unlock_irqrestore(&ep->lock, flags);
/* We have to call this outside the lock */
if (pwake)
rb_insert_color(&epi->rbn, &ep->rbr);
}
+/*
+ * Must be called with "mtx" held.
+ */
static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
struct file *tfile, int fd)
{
epi->ep = ep;
ep_set_ffd(&epi->ffd, tfile, fd);
epi->event = *event;
- atomic_set(&epi->usecnt, 1);
epi->nwait = 0;
+ epi->next = EP_UNACTIVE_PTR;
/* Initialize the poll table using the queue callback */
epq.epi = epi;
/*
* Attach the item to the poll hooks and get current event bits.
* We can safely use the file* here because its usage count has
- * been increased by the caller of this function.
+ * been increased by the caller of this function. Note that after
+ * this operation completes, the poll callback can start hitting
+ * the new item.
*/
revents = tfile->f_op->poll(tfile, &epq.pt);
list_add_tail(&epi->fllink, &tfile->f_ep_links);
spin_unlock(&tfile->f_ep_lock);
- /* We have to drop the new item inside our item list to keep track of it */
- write_lock_irqsave(&ep->lock, flags);
-
- /* Add the current item to the rb-tree */
+ /*
+ * Add the current item to the RB tree. All RB tree operations are
+ * protected by "mtx", and ep_insert() is called with "mtx" held.
+ */
ep_rbtree_insert(ep, epi);
+ /* We have to drop the new item inside our item list to keep track of it */
+ spin_lock_irqsave(&ep->lock, flags);
+
/* If the file is already "ready" we drop it inside the ready list */
if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
pwake++;
}
- write_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irqrestore(&ep->lock, flags);
/* We have to call this outside the lock */
if (pwake)
/*
* We need to do this because an event could have been arrived on some
- * allocated wait queue.
+ * allocated wait queue. Note that we don't care about the ep->ovflist
+ * list, since that is used/cleaned only inside a section bound by "mtx".
+ * And ep_insert() is called with "mtx" held.
*/
- write_lock_irqsave(&ep->lock, flags);
+ spin_lock_irqsave(&ep->lock, flags);
if (ep_is_linked(&epi->rdllink))
list_del_init(&epi->rdllink);
- write_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irqrestore(&ep->lock, flags);
kmem_cache_free(epi_cache, epi);
error_return:
/*
* Modify the interest event mask by dropping an event if the new mask
- * has a match in the current file status.
+ * has a match in the current file status. Must be called with "mtx" held.
*/
static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
{
*/
revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
- write_lock_irqsave(&ep->lock, flags);
+ spin_lock_irqsave(&ep->lock, flags);
/* Copy the data member from inside the lock */
epi->event.data = event->data;
/*
- * If the item is not linked to the RB tree it means that it's on its
- * way toward the removal. Do nothing in this case.
+ * If the item is "hot" and it is not registered inside the ready
+ * list, push it inside.
*/
- if (ep_rb_linked(&epi->rbn)) {
- /*
- * If the item is "hot" and it is not registered inside the ready
- * list, push it inside. If the item is not "hot" and it is currently
- * registered inside the ready list, unlink it.
- */
- if (revents & event->events) {
- if (!ep_is_linked(&epi->rdllink)) {
- list_add_tail(&epi->rdllink, &ep->rdllist);
-
- /* Notify waiting tasks that events are available */
- if (waitqueue_active(&ep->wq))
- __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
- TASK_INTERRUPTIBLE);
- if (waitqueue_active(&ep->poll_wait))
- pwake++;
- }
+ if (revents & event->events) {
+ if (!ep_is_linked(&epi->rdllink)) {
+ list_add_tail(&epi->rdllink, &ep->rdllist);
+
+ /* Notify waiting tasks that events are available */
+ if (waitqueue_active(&ep->wq))
+ __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
+ TASK_INTERRUPTIBLE);
+ if (waitqueue_active(&ep->poll_wait))
+ pwake++;
}
}
-
- write_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irqrestore(&ep->lock, flags);
/* We have to call this outside the lock */
if (pwake)
return 0;
}
-/*
- * This function is called without holding the "ep->lock" since the call to
- * __copy_to_user() might sleep, and also f_op->poll() might reenable the IRQ
- * because of the way poll() is traditionally implemented in Linux.
- */
-static int ep_send_events(struct eventpoll *ep, struct list_head *txlist,
- struct epoll_event __user *events, int maxevents)
+static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *events,
+ int maxevents)
{
int eventcnt, error = -EFAULT, pwake = 0;
unsigned int revents;
unsigned long flags;
- struct epitem *epi;
- struct list_head injlist;
+ struct epitem *epi, *nepi;
+ struct list_head txlist;
+
+ INIT_LIST_HEAD(&txlist);
+
+ /*
+ * We need to lock this because we could be hit by
+ * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
+ */
+ mutex_lock(&ep->mtx);
- INIT_LIST_HEAD(&injlist);
+ /*
+ * Steal the ready list, and re-init the original one to the
+ * empty list. Also, set ep->ovflist to NULL so that events
+ * happening while looping w/out locks, are not lost. We cannot
+ * have the poll callback to queue directly on ep->rdllist,
+ * because we are doing it in the loop below, in a lockless way.
+ */
+ spin_lock_irqsave(&ep->lock, flags);
+ list_splice(&ep->rdllist, &txlist);
+ INIT_LIST_HEAD(&ep->rdllist);
+ ep->ovflist = NULL;
+ spin_unlock_irqrestore(&ep->lock, flags);
/*
* We can loop without lock because this is a task private list.
* We just splice'd out the ep->rdllist in ep_collect_ready_items().
- * Items cannot vanish during the loop because we are holding "sem" in
- * read.
+ * Items cannot vanish during the loop because we are holding "mtx".
*/
- for (eventcnt = 0; !list_empty(txlist) && eventcnt < maxevents;) {
- epi = list_first_entry(txlist, struct epitem, rdllink);
- prefetch(epi->rdllink.next);
+ for (eventcnt = 0; !list_empty(&txlist) && eventcnt < maxevents;) {
+ epi = list_first_entry(&txlist, struct epitem, rdllink);
+
+ list_del_init(&epi->rdllink);
/*
* Get the ready file event set. We can safely use the file
- * because we are holding the "sem" in read and this will
- * guarantee that both the file and the item will not vanish.
+ * because we are holding the "mtx" and this will guarantee
+ * that both the file and the item will not vanish.
*/
revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
revents &= epi->event.events;
/*
* Is the event mask intersect the caller-requested one,
* deliver the event to userspace. Again, we are holding
- * "sem" in read, so no operations coming from userspace
- * can change the item.
+ * "mtx", so no operations coming from userspace can change
+ * the item.
*/
if (revents) {
if (__put_user(revents,
epi->event.events &= EP_PRIVATE_BITS;
eventcnt++;
}
-
/*
- * This is tricky. We are holding the "sem" in read, and this
- * means that the operations that can change the "linked" status
- * of the epoll item (epi->rbn and epi->rdllink), cannot touch
- * them. Also, since we are "linked" from a epi->rdllink POV
- * (the item is linked to our transmission list we just
- * spliced), the ep_poll_callback() cannot touch us either,
- * because of the check present in there. Another parallel
- * epoll_wait() will not get the same result set, since we
- * spliced the ready list before. Note that list_del() still
- * shows the item as linked to the test in ep_poll_callback().
+ * At this point, noone can insert into ep->rdllist besides
+ * us. The epoll_ctl() callers are locked out by us holding
+ * "mtx" and the poll callback will queue them in ep->ovflist.
*/
- list_del(&epi->rdllink);
if (!(epi->event.events & EPOLLET) &&
- (revents & epi->event.events))
- list_add_tail(&epi->rdllink, &injlist);
- else {
- /*
- * Be sure the item is totally detached before re-init
- * the list_head. After INIT_LIST_HEAD() is committed,
- * the ep_poll_callback() can requeue the item again,
- * but we don't care since we are already past it.
- */
- smp_mb();
- INIT_LIST_HEAD(&epi->rdllink);
- }
+ (revents & epi->event.events))
+ list_add_tail(&epi->rdllink, &ep->rdllist);
}
error = 0;
- errxit:
+errxit:
+ spin_lock_irqsave(&ep->lock, flags);
/*
- * If the re-injection list or the txlist are not empty, re-splice
- * them to the ready list and do proper wakeups.
+ * During the time we spent in the loop above, some other events
+ * might have been queued by the poll callback. We re-insert them
+ * here (in case they are not already queued, or they're one-shot).
*/
- if (!list_empty(&injlist) || !list_empty(txlist)) {
- write_lock_irqsave(&ep->lock, flags);
+ for (nepi = ep->ovflist; (epi = nepi) != NULL;
+ nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
+ if (!ep_is_linked(&epi->rdllink) &&
+ (epi->event.events & ~EP_PRIVATE_BITS))
+ list_add_tail(&epi->rdllink, &ep->rdllist);
+ }
+ /*
+ * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
+ * releasing the lock, events will be queued in the normal way inside
+ * ep->rdllist.
+ */
+ ep->ovflist = EP_UNACTIVE_PTR;
+
+ /*
+ * In case of error in the event-send loop, or in case the number of
+ * ready events exceeds the userspace limit, we need to splice the
+ * "txlist" back inside ep->rdllist.
+ */
+ list_splice(&txlist, &ep->rdllist);
- list_splice(txlist, &ep->rdllist);
- list_splice(&injlist, &ep->rdllist);
+ if (!list_empty(&ep->rdllist)) {
/*
- * Wake up ( if active ) both the eventpoll wait list and the ->poll()
- * wait list.
+ * Wake up (if active) both the eventpoll wait list and the ->poll()
+ * wait list (delayed after we release the lock).
*/
if (waitqueue_active(&ep->wq))
__wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
TASK_INTERRUPTIBLE);
if (waitqueue_active(&ep->poll_wait))
pwake++;
-
- write_unlock_irqrestore(&ep->lock, flags);
}
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ mutex_unlock(&ep->mtx);
/* We have to call this outside the lock */
if (pwake)
return eventcnt == 0 ? error: eventcnt;
}
-/*
- * Perform the transfer of events to user space.
- */
-static int ep_events_transfer(struct eventpoll *ep,
- struct epoll_event __user *events, int maxevents)
-{
- int eventcnt;
- unsigned long flags;
- struct list_head txlist;
-
- INIT_LIST_HEAD(&txlist);
-
- /*
- * We need to lock this because we could be hit by
- * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
- */
- down_read(&ep->sem);
-
- /*
- * Steal the ready list, and re-init the original one to the
- * empty list.
- */
- write_lock_irqsave(&ep->lock, flags);
- list_splice(&ep->rdllist, &txlist);
- INIT_LIST_HEAD(&ep->rdllist);
- write_unlock_irqrestore(&ep->lock, flags);
-
- /* Build result set in userspace */
- eventcnt = ep_send_events(ep, &txlist, events, maxevents);
-
- up_read(&ep->sem);
-
- return eventcnt;
-}
-
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, long timeout)
{
MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;
retry:
- write_lock_irqsave(&ep->lock, flags);
+ spin_lock_irqsave(&ep->lock, flags);
res = 0;
if (list_empty(&ep->rdllist)) {
* ep_poll_callback() when events will become available.
*/
init_waitqueue_entry(&wait, current);
+ wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue(&ep->wq, &wait);
for (;;) {
break;
}
- write_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irqrestore(&ep->lock, flags);
jtimeout = schedule_timeout(jtimeout);
- write_lock_irqsave(&ep->lock, flags);
+ spin_lock_irqsave(&ep->lock, flags);
}
__remove_wait_queue(&ep->wq, &wait);
/* Is it worth to try to dig for events ? */
eavail = !list_empty(&ep->rdllist);
- write_unlock_irqrestore(&ep->lock, flags);
+ spin_unlock_irqrestore(&ep->lock, flags);
/*
* Try to transfer events to user space. In case we get 0 events and
* more luck.
*/
if (!res && eavail &&
- !(res = ep_events_transfer(ep, events, maxevents)) && jtimeout)
+ !(res = ep_send_events(ep, events, maxevents)) && jtimeout)
goto retry;
return res;
}
/*
- * It opens an eventpoll file descriptor by suggesting a storage of "size"
- * file descriptors. The size parameter is just an hint about how to size
- * data structures. It won't prevent the user to store more than "size"
- * file descriptors inside the epoll interface. It is the kernel part of
- * the userspace epoll_create(2).
+ * It opens an eventpoll file descriptor. The "size" parameter is there
+ * for historical reasons, when epoll was using an hash instead of an
+ * RB tree. With the current implementation, the "size" parameter is ignored
+ * (besides sanity checks).
*/
asmlinkage long sys_epoll_create(int size)
{
error_free:
ep_free(ep);
- kfree(ep);
error_return:
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
current, size, error));
/*
* The following function implements the controller interface for
* the eventpoll file that enables the insertion/removal/change of
- * file descriptors inside the interest set. It represents
- * the kernel part of the user space epoll_ctl(2).
+ * file descriptors inside the interest set.
*/
asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
struct epoll_event __user *event)
*/
ep = file->private_data;
- down_write(&ep->sem);
+ mutex_lock(&ep->mtx);
- /* Try to lookup the file inside our RB tree */
+ /*
+ * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
+ * above, we can be sure to be able to use the item looked up by
+ * ep_find() till we release the mutex.
+ */
epi = ep_find(ep, tfile, fd);
error = -EINVAL;
error = -ENOENT;
break;
}
- /*
- * The function ep_find() increments the usage count of the structure
- * so, if this is not NULL, we need to release it.
- */
- if (epi)
- ep_release_epitem(epi);
- up_write(&ep->sem);
+ mutex_unlock(&ep->mtx);
error_tgt_fput:
fput(tfile);
if (sigmask) {
if (error == -EINTR) {
memcpy(¤t->saved_sigmask, &sigsaved,
- sizeof(sigsaved));
+ sizeof(sigsaved));
set_thread_flag(TIF_RESTORE_SIGMASK);
} else
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
* +-------------+----------------+--------------+
*/
-#ifdef CONFIG_SMP
#include <asm/smp.h>
+#ifdef CONFIG_SMP
#define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn)
#else
extern unsigned long last_asn;
}
#define atomic_sub(i, v) atomic_sub_return(i, v)
+#define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0)
static __inline__ int atomic_inc_return(atomic_t *v)
{
DIE_GPF,
DIE_CALL,
DIE_NMI_IPI,
- DIE_NMI_POST,
DIE_PAGE_FAULT,
};
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
+/* Defined in head.S */
+extern struct Xgt_desc_struct early_gdt_descr;
+
extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(void);
extern void cpu_init(void);
+extern void init_gdt(int cpu);
extern int force_mwait;
long strncpy_from_user(char *dst, const char __user *src, long count);
long strnlen_user(const char __user *src, long n);
-unsigned long clear_user(void __user *to, unsigned long n);
+unsigned long __clear_user(void __user *to, unsigned long n);
+
+#define clear_user __clear_user
#define strlen_user(str) strnlen_user(str, 32767)
sp_enter_debugger(); \
} while(0)
+enum die_val {
+ DIE_UNUSED,
+};
+
#endif /* !(__ASSEMBLY__) */
/* Some nice offset defines for assembler code. */
#define KDEBUG_DUNNO2_OFF 0x8
#define KDEBUG_TEACH_OFF 0xc
-enum die_val {
- DIE_UNUSED,
-};
-
#endif /* !(_SPARC_KDEBUG_H) */
#include <asm/psr.h>
#include <asm/ptrace.h>
#include <asm/btfixup.h>
+#include <asm/smp.h>
#ifndef __ASSEMBLY__
/* need struct page definitions */
#include <linux/mm.h>
+#include <asm/of_device.h>
+
static inline int
dma_supported(struct device *dev, u64 mask)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
return pci_dma_supported(to_pci_dev(dev), mask);
}
static inline int
dma_set_mask(struct device *dev, u64 dma_mask)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
}
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag);
}
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
}
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
}
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
}
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
}
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
}
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
}
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
}
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
size, (int)direction);
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
size, (int)direction);
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
}
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
- BUG_ON(dev->bus != &pci_bus_type);
+ BUG_ON(dev->bus != &pci_bus_type &&
+ dev->bus != &ebus_bus_type);
pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
}
DIE_GPF,
DIE_CALL,
DIE_NMI_IPI,
- DIE_NMI_POST,
DIE_PAGE_FAULT,
};
asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename,
struct compat_timespec __user *t, int flags);
+asmlinkage long compat_sys_signalfd(int ufd,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags,
+ const struct compat_itimerspec __user *utmr);
+
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
int ide_in_drive_list(struct hd_driveid *, const struct drive_list_entry *);
int __ide_dma_bad_drive(ide_drive_t *);
int __ide_dma_good_drive(ide_drive_t *);
-int ide_use_dma(ide_drive_t *);
u8 ide_max_dma_mode(ide_drive_t *);
int ide_tune_dma(ide_drive_t *);
void ide_dma_off(ide_drive_t *);
#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
#else
-static inline int ide_use_dma(ide_drive_t *drive) { return 0; }
static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
static inline int ide_tune_dma(ide_drive_t *drive) { return 0; }
static inline void ide_dma_off(ide_drive_t *drive) { ; }
/* ide-lib.c */
u8 ide_rate_filter(ide_drive_t *, u8);
-extern int ide_dma_enable(ide_drive_t *drive);
extern char *ide_xfer_verbose(u8 xfer_rate);
extern void ide_toggle_bounce(ide_drive_t *drive, int on);
extern int ide_set_xfer_rate(ide_drive_t *drive, u8 rate);
#endif
/* For assembly routines */
-#ifdef CONFIG_HOTPLUG_CPU
-#define __INIT .section ".text","ax"
-#define __INITDATA .section ".data","aw"
-#else
#define __INIT .section ".init.text","ax"
-#define __INITDATA .section ".init.data","aw"
-#endif
#define __FINIT .previous
+#define __INITDATA .section ".init.data","aw"
#ifndef __ASSEMBLY__
/*
void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
+#ifdef CONFIG_MMU
int ioremap_page_range(unsigned long addr, unsigned long end,
unsigned long phys_addr, pgprot_t prot);
+#else
+static inline int ioremap_page_range(unsigned long addr, unsigned long end,
+ unsigned long phys_addr, pgprot_t prot)
+{
+ return 0;
+}
+#endif
/*
* Managed iomap interface
#define USB_ACM_AUX_MAJOR 167
#define USB_CHAR_MAJOR 180
+#define MMC_BLOCK_MAJOR 179
+
#define VXVM_MAJOR 199 /* VERITAS volume i/o driver */
#define VXSPEC_MAJOR 200 /* VERITAS volume config driver */
#define VXDMP_MAJOR 201 /* VERITAS volume multipath driver */
#define PCI_DEVICE_ID_VIA_8363_0 0x0305
#define PCI_DEVICE_ID_VIA_P4M800CE 0x0314
#define PCI_DEVICE_ID_VIA_P4M890 0x0327
+#define PCI_DEVICE_ID_VIA_VT3324 0x0324
#define PCI_DEVICE_ID_VIA_VT3336 0x0336
#define PCI_DEVICE_ID_VIA_8371_0 0x0391
#define PCI_DEVICE_ID_VIA_8501_0 0x0501
#define KMALLOC_SHIFT_LOW 3
#ifdef CONFIG_LARGE_ALLOCS
-#define KMALLOC_SHIFT_HIGH 25
+#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) =< 25 ? \
+ (MAX_ORDER + PAGE_SHIFT - 1) : 25)
#else
#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256
#define KMALLOC_SHIFT_HIGH 20
*/
WARN_ON_ONCE(size == 0);
+ if (size >= (1 << KMALLOC_SHIFT_HIGH))
+ return -1;
+
if (size > 64 && size <= 96)
return 1;
if (size > 128 && size <= 192)
static struct timer_list watchdog_timer;
static DEFINE_SPINLOCK(watchdog_lock);
static cycle_t watchdog_last;
-static int watchdog_resumed;
+static unsigned long watchdog_resumed;
/*
* Interval: 0.5sec Threshold: 0.0625s
spin_lock(&watchdog_lock);
- resumed = watchdog_resumed;
- if (unlikely(resumed))
- watchdog_resumed = 0;
+ resumed = test_and_clear_bit(0, &watchdog_resumed);
wdnow = watchdog->read();
wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
}
static void clocksource_resume_watchdog(void)
{
- spin_lock(&watchdog_lock);
- watchdog_resumed = 1;
- spin_unlock(&watchdog_lock);
+ set_bit(0, &watchdog_resumed);
}
static void clocksource_check_watchdog(struct clocksource *cs)
unsigned long flags;
unsigned long now = read_persistent_clock();
+ clocksource_resume();
+
write_seqlock_irqsave(&xtime_lock, flags);
if (now && (now > timekeeping_suspend_time)) {
prev = &curr->next;
}
- clocksource_resume();
-
write_seqlock_irqsave(&xtime_lock, flags);
if (ti == time_interpolator) {
/* we lost the best time-interpolator: */
SNMP_MIB_SENTINEL
};
+/* Following RFC4293 items are displayed in /proc/net/netstat */
+static const struct snmp_mib snmp4_ipextstats_list[] = {
+ SNMP_MIB_ITEM("InNoRoutes", IPSTATS_MIB_INNOROUTES),
+ SNMP_MIB_ITEM("InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS),
+ SNMP_MIB_ITEM("InMcastPkts", IPSTATS_MIB_INMCASTPKTS),
+ SNMP_MIB_ITEM("OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS),
+ SNMP_MIB_ITEM("InBcastPkts", IPSTATS_MIB_INBCASTPKTS),
+ SNMP_MIB_ITEM("OutBcastPkts", IPSTATS_MIB_OUTBCASTPKTS),
+ SNMP_MIB_SENTINEL
+};
+
static const struct snmp_mib snmp4_icmp_list[] = {
SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS),
SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS),
snmp_fold_field((void **)net_statistics,
snmp4_net_list[i].entry));
+ seq_puts(seq, "\nIpExt:");
+ for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
+ seq_printf(seq, " %s", snmp4_ipextstats_list[i].name);
+
+ seq_puts(seq, "\nIpExt:");
+ for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
+ seq_printf(seq, " %lu",
+ snmp_fold_field((void **)ip_statistics,
+ snmp4_ipextstats_list[i].entry));
+
seq_putc(seq, '\n');
return 0;
}
IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS);
hdr = ipv6_hdr(skb);
- deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) ||
+ deliver = unlikely(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) ||
ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
/*
band = res.classid;
}
band = TC_H_MIN(band) - 1;
- if (band > q->bands)
+ if (band >= q->bands)
return q->queues[q->prio2band[0]];
return q->queues[band];
n = __vmalloc(sz, GFP_KERNEL, PAGE_KERNEL);
else
n = (struct hlist_head *)
- __get_free_pages(GFP_KERNEL, get_order(sz));
+ __get_free_pages(GFP_KERNEL | __GFP_NOWARN,
+ get_order(sz));
if (n)
memset(n, 0, sz);
struct hlist_head *chain;
struct hlist_node *entry;
+ *err = -ENOENT;
+ if (xfrm_policy_id2dir(id) != dir)
+ return NULL;
+
*err = 0;
write_lock_bh(&xfrm_policy_lock);
chain = xfrm_policy_byidx + idx_hash(id);