#include <linux/genetlink.h>
#include <linux/taskstats.h>
+#include <linux/cgroupstats.h>
/*
* Generic macros for dealing with netlink sockets. Might be duplicated
fprintf(stderr, " -i: print IO accounting (works only with -p)\n");
fprintf(stderr, " -l: listen forever\n");
fprintf(stderr, " -v: debug on\n");
+ fprintf(stderr, " -C: container path\n");
}
/*
t->nvcsw, t->nivcsw);
}
+void print_cgroupstats(struct cgroupstats *c)
+{
+ printf("sleeping %llu, blocked %llu, running %llu, stopped %llu, "
+ "uninterruptible %llu\n", c->nr_sleeping, c->nr_io_wait,
+ c->nr_running, c->nr_stopped, c->nr_uninterruptible);
+}
+
+
void print_ioacct(struct taskstats *t)
{
printf("%s: read=%llu, write=%llu, cancelled_write=%llu\n",
int maskset = 0;
char *logfile = NULL;
int loop = 0;
+ int containerset = 0;
+ char containerpath[1024];
+ int cfd = 0;
struct msgtemplate msg;
while (1) {
- c = getopt(argc, argv, "qdiw:r:m:t:p:vl");
+ c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:");
if (c < 0)
break;
printf("printing task/process context switch rates\n");
print_task_context_switch_counts = 1;
break;
+ case 'C':
+ containerset = 1;
+ strncpy(containerpath, optarg, strlen(optarg) + 1);
+ break;
case 'w':
logfile = strdup(optarg);
printf("write to file %s\n", logfile);
}
}
+ if (tid && containerset) {
+ fprintf(stderr, "Select either -t or -C, not both\n");
+ goto err;
+ }
+
if (tid) {
rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
cmd_type, &tid, sizeof(__u32));
}
}
+ if (containerset) {
+ cfd = open(containerpath, O_RDONLY);
+ if (cfd < 0) {
+ perror("error opening container file");
+ goto err;
+ }
+ rc = send_cmd(nl_sd, id, mypid, CGROUPSTATS_CMD_GET,
+ CGROUPSTATS_CMD_ATTR_FD, &cfd, sizeof(__u32));
+ if (rc < 0) {
+ perror("error sending cgroupstats command");
+ goto err;
+ }
+ }
+
do {
int i;
}
break;
+ case CGROUPSTATS_TYPE_CGROUP_STATS:
+ print_cgroupstats(NLA_DATA(na));
+ break;
default:
fprintf(stderr, "Unknown nla_type %d\n",
na->nla_type);
close(nl_sd);
if (fd)
close(fd);
+ if (cfd)
+ close(cfd);
return 0;
}
---------------------------
-What: Interrupt only SA_* flags
-When: September 2007
-Why: The interrupt related SA_* flags are replaced by IRQF_* to move them
- out of the signal namespace.
-
-Who: Thomas Gleixner <tglx@linutronix.de>
-
----------------------------
-
What: PHYSDEVPATH, PHYSDEVBUS, PHYSDEVDRIVER in the uevent environment
When: October 2008
Why: The stacking of class devices makes these values misleading and
And,
-trace_mark(subsystem_event, "%d %s", someint, somestring);
+trace_mark(subsystem_event, "myint %d mystring %s", someint, somestring);
Where :
- subsystem_event is an identifier unique to your event
- subsystem is the name of your subsystem.
- event is the name of the event to mark.
-- "%d %s" is the formatted string for the serializer.
+- "myint %d mystring %s" is the formatted string for the serializer. "myint" and
+ "mystring" are repectively the field names associated with the first and
+ second parameter.
- someint is an integer.
- somestring is a char pointer.
* RTC_IRQP_SET, RTC_IRQP_READ: the irq_set_freq function will be called
to set the frequency while the framework will handle the read for you
since the frequency is stored in the irq_freq member of the rtc_device
- structure. Also make sure you set the max_user_freq member in your
- initialization routines so the framework can sanity check the user
- input for you.
+ structure. Your driver needs to initialize the irq_freq member during
+ init. Make sure you check the requested frequency is in range of your
+ hardware in the irq_set_freq function. If you cannot actually change
+ the frequency, just return -ENOTTY.
If all else fails, check out the rtc-test.c driver!
bool
default y
+config NO_DMA
+ bool
+ default y
+
config RWSEM_GENERIC_SPINLOCK
bool
default y
source "fs/Kconfig.binfmt"
+config GENERIC_HARDIRQS
+ bool
+ default y
+
config ETRAX_CMDLINE
string "Kernel command line"
default "root=/dev/mtdblock3"
# bring in ETRAX built-in drivers
menu "Drivers for built-in interfaces"
-source arch/cris/arch-v10/drivers/Kconfig
+# arch/cris/arch is a symlink to correct arch (arch-v10 or arch-v32)
+source arch/cris/arch/drivers/Kconfig
endmenu
source "drivers/telephony/Kconfig"
+source "drivers/i2c/Kconfig"
+
+source "drivers/rtc/Kconfig"
+
#
# input before char - char/joystick depends on it. As does USB.
#
source "sound/Kconfig"
+source "drivers/pcmcia/Kconfig"
+
+source "drivers/pci/Kconfig"
+
source "drivers/usb/Kconfig"
source "kernel/Kconfig.instrumentation"
CONFIG_MTD_CFI=y
# CONFIG_MTD_CFI_INTELEXT is not set
CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_AMDSTD=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_ETRAX_I2C=y
# CONFIG_MTD_CFI_GEOMETRY is not set
# CONFIG_MTD_CFI_INTELEXT is not set
CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_AMDSTD=y
# CONFIG_MTD_SHARP is not set
# CONFIG_MTD_PHYSMAP is not set
# CONFIG_MTD_NORA is not set
bool "Ethernet support"
depends on ETRAX_ARCH_V10
select NET_ETHERNET
+ select MII
help
This option enables the ETRAX 100LX built-in 10/100Mbit Ethernet
controller.
select MTD
select MTD_CFI
select MTD_CFI_AMDSTD
- select MTD_OBSOLETE_CHIPS
- select MTD_AMDSTD
select MTD_CHAR
select MTD_BLOCK
select MTD_PARTITIONS
"%s: Probing a 0x%08lx bytes large window at 0x%08lx.\n",
map_cs->name, map_cs->size, map_cs->map_priv_1);
-#ifdef CONFIG_MTD_AMDSTD
- mtd_cs = do_map_probe("amd_flash", map_cs);
-#endif
#ifdef CONFIG_MTD_CFI
+ mtd_cs = do_map_probe("cfi_probe", map_cs);
+#endif
+#ifdef CONFIG_MTD_JEDECPROBE
if (!mtd_cs) {
- mtd_cs = do_map_probe("cfi_probe", map_cs);
+ mtd_cs = do_map_probe("jedec_probe", map_cs);
}
#endif
data = *R_PORT_PB_DATA;
else if (priv->minor == GPIO_MINOR_G)
data = *R_PORT_G_DATA;
- else
+ else {
+ spin_unlock(&gpio_lock);
return 0;
+ }
if ((data & priv->highalarm) ||
(~data & priv->lowalarm)) {
ssize_t retval = count;
if (priv->minor !=GPIO_MINOR_A && priv->minor != GPIO_MINOR_B) {
- return -EFAULT;
+ retval = -EFAULT;
+ goto out;
}
if (!access_ok(VERIFY_READ, buf, count)) {
- return -EFAULT;
+ retval = -EFAULT;
+ goto out;
}
clk_mask = priv->clk_mask;
data_mask = priv->data_mask;
/* It must have been configured using the IO_CFG_WRITE_MODE */
/* Perhaps a better error code? */
if (clk_mask == 0 || data_mask == 0) {
- return -EPERM;
+ retval = -EPERM;
+ goto out;
}
write_msb = priv->write_msb;
D(printk("gpio_write: %lu to data 0x%02X clk 0x%02X msb: %i\n",count, data_mask, clk_mask, write_msb));
}
}
}
+out:
spin_unlock(&gpio_lock);
return retval;
}
while (p) {
if (p->highalarm | p->lowalarm) {
gpio_some_alarms = 1;
+ spin_unlock(&gpio_lock);
return 0;
}
p = p->next;
;; deal with pending signals and notify-resume requests
move.d $r9, $r10 ; do_notify_resume syscall/irq param
- moveq 0, $r11 ; oldset param - 0 in this case
- move.d $sp, $r12 ; the regs param
- move.d $r1, $r13 ; the thread_info_flags parameter
+ move.d $sp, $r11 ; the regs param
+ move.d $r1, $r12 ; the thread_info_flags parameter
jsr do_notify_resume
ba _Rexit
push $r10 ; push orig_r10
clear.d [$sp=$sp-4] ; frametype == 0, normal frame
+ ;; If there is a glitch on the NMI pin shorter than ~100ns
+ ;; (i.e. non-active by the time we get here) then the nmi_pin bit
+ ;; in R_IRQ_MASK0_RD will already be cleared. The watchdog_nmi bit
+ ;; is cleared by us however (when feeding the watchdog), which is why
+ ;; we use that bit to determine what brought us here.
+
move.d [R_IRQ_MASK0_RD], $r1 ; External NMI or watchdog?
- and.d 0x80000000, $r1
- beq wdog
+ and.d (1<<30), $r1
+ bne wdog
move.d $sp, $r10
jsr handle_nmi
setf m ; Enable NMI again
- retb ; Return from NMI
+ ba _Rexit ; Return the standard way
nop
wdog:
#if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM)
push $r10 ; push orig_r10
clear.d [$sp=$sp-4] ; frametype == 0, normal frame
- moveq 2, $r2 ; first bit we care about is the timer0 irq
- move.d [R_VECT_MASK_RD], $r0; read the irq bits that triggered the multiple irq
- move.d $r0, [R_VECT_MASK_CLR] ; Block all active IRQs
-1:
- btst $r2, $r0 ; check for the irq given by bit r2
- bpl 2f
- move.d $r2, $r10 ; First argument to do_IRQ
- move.d $sp, $r11 ; second argument to do_IRQ
- jsr do_IRQ
-2:
- addq 1, $r2 ; next vector bit
- cmp.b 32, $r2
- bne 1b ; process all irq's up to and including number 31
- moveq 0, $r9 ; make ret_from_intr realise we came from an ir
+ move.d $sp, $r10
+ jsr do_multiple_IRQ
- move.d $r0, [R_VECT_MASK_SET] ; Unblock all the IRQs
jump ret_from_intr
do_sigtrap:
ba do_sigtrap ; SIGTRAP the offending process.
pop $dccr ; Restore dccr in delay slot.
+ .global kernel_execve
+kernel_execve:
+ move.d __NR_execve, $r9
+ break 13
+ ret
+ nop
+
.data
hw_bp_trigs:
.long sys_add_key
.long sys_request_key
.long sys_keyctl
+ .long sys_ioprio_set
+ .long sys_ioprio_get /* 290 */
+ .long sys_inotify_init
+ .long sys_inotify_add_watch
+ .long sys_inotify_rm_watch
+ .long sys_migrate_pages
+ .long sys_openat /* 295 */
+ .long sys_mkdirat
+ .long sys_mknodat
+ .long sys_fchownat
+ .long sys_futimesat
+ .long sys_fstatat64 /* 300 */
+ .long sys_unlinkat
+ .long sys_renameat
+ .long sys_linkat
+ .long sys_symlinkat
+ .long sys_readlinkat /* 305 */
+ .long sys_fchmodat
+ .long sys_faccessat
+ .long sys_pselect6
+ .long sys_ppoll
+ .long sys_unshare /* 310 */
+ .long sys_set_robust_list
+ .long sys_get_robust_list
+ .long sys_splice
+ .long sys_sync_file_range
+ .long sys_tee /* 315 */
+ .long sys_vmsplice
+ .long sys_move_pages
+ .long sys_getcpu
+ .long sys_epoll_pwait
+ .long sys_utimensat /* 320 */
+ .long sys_signalfd
+ .long sys_timerfd
+ .long sys_eventfd
+ .long sys_fallocate
/*
* NOTE!! This doesn't have to be exact - we just have
-/* $Id: fasttimer.c,v 1.9 2005/03/04 08:16:16 starvik Exp $
+/*
* linux/arch/cris/kernel/fasttimer.c
*
* Fast timers for ETRAX100/ETRAX100LX
- * This may be useful in other OS than Linux so use 2 space indentation...
*
- * $Log: fasttimer.c,v $
- * Revision 1.9 2005/03/04 08:16:16 starvik
- * Merge of Linux 2.6.11.
- *
- * Revision 1.8 2005/01/05 06:09:29 starvik
- * cli()/sti() will be obsolete in 2.6.11.
- *
- * Revision 1.7 2005/01/03 13:35:46 starvik
- * Removed obsolete stuff.
- * Mark fast timer IRQ as not shared.
- *
- * Revision 1.6 2004/05/14 10:18:39 starvik
- * Export fast_timer_list
- *
- * Revision 1.5 2004/05/14 07:58:01 starvik
- * Merge of changes from 2.4
- *
- * Revision 1.4 2003/07/04 08:27:41 starvik
- * Merge of Linux 2.5.74
- *
- * Revision 1.3 2002/12/12 08:26:32 starvik
- * Don't use C-comments inside CVS comments
- *
- * Revision 1.2 2002/12/11 15:42:02 starvik
- * Extracted v10 (ETRAX 100LX) specific stuff from arch/cris/kernel/
- *
- * Revision 1.1 2002/11/18 07:58:06 starvik
- * Fast timers (from Linux 2.4)
- *
- * Revision 1.5 2002/10/15 06:21:39 starvik
- * Added call to init_waitqueue_head
- *
- * Revision 1.4 2002/05/28 17:47:59 johana
- * Added del_fast_timer()
- *
- * Revision 1.3 2002/05/28 16:16:07 johana
- * Handle empty fast_timer_list
- *
- * Revision 1.2 2002/05/27 15:38:42 johana
- * Made it compile without warnings on Linux 2.4.
- * (includes, wait_queue, PROC_FS and snprintf)
- *
- * Revision 1.1 2002/05/27 15:32:25 johana
- * arch/etrax100/kernel/fasttimer.c v1.8 from the elinux tree.
- *
- * Revision 1.8 2001/11/27 13:50:40 pkj
- * Disable interrupts while stopping the timer and while modifying the
- * list of active timers in timer1_handler() as it may be interrupted
- * by other interrupts (e.g., the serial interrupt) which may add fast
- * timers.
- *
- * Revision 1.7 2001/11/22 11:50:32 pkj
- * * Only store information about the last 16 timers.
- * * proc_fasttimer_read() now uses an allocated buffer, since it
- * requires more space than just a page even for only writing the
- * last 16 timers. The buffer is only allocated on request, so
- * unless /proc/fasttimer is read, it is never allocated.
- * * Renamed fast_timer_started to fast_timers_started to match
- * fast_timers_added and fast_timers_expired.
- * * Some clean-up.
- *
- * Revision 1.6 2000/12/13 14:02:08 johana
- * Removed volatile for fast_timer_list
- *
- * Revision 1.5 2000/12/13 13:55:35 johana
- * Added DEBUG_LOG, added som cli() and cleanup
- *
- * Revision 1.4 2000/12/05 13:48:50 johana
- * Added range check when writing proc file, modified timer int handling
- *
- * Revision 1.3 2000/11/23 10:10:20 johana
- * More debug/logging possibilities.
- * Moved GET_JIFFIES_USEC() to timex.h and time.c
- *
- * Revision 1.2 2000/11/01 13:41:04 johana
- * Clean up and bugfixes.
- * Created new do_gettimeofday_fast() that gets a timeval struct
- * with time based on jiffies and *R_TIMER0_DATA, uses a table
- * for fast conversion of timer value to microseconds.
- * (Much faster the standard do_gettimeofday() and we don't really
- * want to use the true time - we want the "uptime" so timers don't screw up
- * when we change the time.
- * TODO: Add efficient support for continuous timers as well.
- *
- * Revision 1.1 2000/10/26 15:49:16 johana
- * Added fasttimer, highresolution timers.
- *
- * Copyright (C) 2000,2001 2002 Axis Communications AB, Lund, Sweden
+ * Copyright (C) 2000-2007 Axis Communications AB, Lund, Sweden
*/
#include <linux/errno.h>
#ifdef FAST_TIMER_SANITY_CHECKS
#define SANITYCHECK(x) x
-static int sanity_failed = 0;
+static int sanity_failed;
#else
#define SANITYCHECK(x)
#endif
#define D2(x)
#define DP(x)
-#define __INLINE__ inline
-
-static int fast_timer_running = 0;
-static int fast_timers_added = 0;
-static int fast_timers_started = 0;
-static int fast_timers_expired = 0;
-static int fast_timers_deleted = 0;
-static int fast_timer_is_init = 0;
-static int fast_timer_ints = 0;
+static unsigned int fast_timer_running;
+static unsigned int fast_timers_added;
+static unsigned int fast_timers_started;
+static unsigned int fast_timers_expired;
+static unsigned int fast_timers_deleted;
+static unsigned int fast_timer_is_init;
+static unsigned int fast_timer_ints;
struct fast_timer *fast_timer_list = NULL;
#define DEBUG_LOG_MAX 128
static const char * debug_log_string[DEBUG_LOG_MAX];
static unsigned long debug_log_value[DEBUG_LOG_MAX];
-static int debug_log_cnt = 0;
-static int debug_log_cnt_wrapped = 0;
+static unsigned int debug_log_cnt;
+static unsigned int debug_log_cnt_wrapped;
#define DEBUG_LOG(string, value) \
{ \
int timer_delay_settings[NUM_TIMER_STATS];
/* Not true gettimeofday, only checks the jiffies (uptime) + useconds */
-void __INLINE__ do_gettimeofday_fast(struct timeval *tv)
+inline void do_gettimeofday_fast(struct fasttime_t *tv)
{
- unsigned long sec = jiffies;
- unsigned long usec = GET_JIFFIES_USEC();
-
- usec += (sec % HZ) * (1000000 / HZ);
- sec = sec / HZ;
-
- if (usec > 1000000)
- {
- usec -= 1000000;
- sec++;
- }
- tv->tv_sec = sec;
- tv->tv_usec = usec;
+ tv->tv_jiff = jiffies;
+ tv->tv_usec = GET_JIFFIES_USEC();
}
-int __INLINE__ timeval_cmp(struct timeval *t0, struct timeval *t1)
+inline int fasttime_cmp(struct fasttime_t *t0, struct fasttime_t *t1)
{
- if (t0->tv_sec < t1->tv_sec)
- {
- return -1;
- }
- else if (t0->tv_sec > t1->tv_sec)
- {
- return 1;
- }
- if (t0->tv_usec < t1->tv_usec)
- {
- return -1;
- }
- else if (t0->tv_usec > t1->tv_usec)
- {
- return 1;
- }
- return 0;
+ /* Compare jiffies. Takes care of wrapping */
+ if (time_before(t0->tv_jiff, t1->tv_jiff))
+ return -1;
+ else if (time_after(t0->tv_jiff, t1->tv_jiff))
+ return 1;
+
+ /* Compare us */
+ if (t0->tv_usec < t1->tv_usec)
+ return -1;
+ else if (t0->tv_usec > t1->tv_usec)
+ return 1;
+ return 0;
}
-void __INLINE__ start_timer1(unsigned long delay_us)
+inline void start_timer1(unsigned long delay_us)
{
int freq_index = 0; /* This is the lowest resolution */
unsigned long upper_limit = MAX_DELAY_US;
timer_freq_settings[fast_timers_started % NUM_TIMER_STATS] = freq_index;
timer_delay_settings[fast_timers_started % NUM_TIMER_STATS] = delay_us;
- D1(printk("start_timer1 : %d us freq: %i div: %i\n",
+ D1(printk(KERN_DEBUG "start_timer1 : %d us freq: %i div: %i\n",
delay_us, freq_index, div));
/* Clear timer1 irq */
*R_IRQ_MASK0_CLR = IO_STATE(R_IRQ_MASK0_CLR, timer1, clr);
printk(KERN_WARNING
"timer name: %s data: 0x%08lX already in list!\n", name, data);
sanity_failed++;
- return;
+ goto done;
}
else
{
t->name = name;
t->tv_expires.tv_usec = t->tv_set.tv_usec + delay_us % 1000000;
- t->tv_expires.tv_sec = t->tv_set.tv_sec + delay_us / 1000000;
+ t->tv_expires.tv_jiff = t->tv_set.tv_jiff + delay_us / 1000000 / HZ;
if (t->tv_expires.tv_usec > 1000000)
{
t->tv_expires.tv_usec -= 1000000;
- t->tv_expires.tv_sec++;
+ t->tv_expires.tv_jiff += HZ;
}
#ifdef FAST_TIMER_LOG
timer_added_log[fast_timers_added % NUM_TIMER_STATS] = *t;
fast_timers_added++;
/* Check if this should timeout before anything else */
- if (tmp == NULL || timeval_cmp(&t->tv_expires, &tmp->tv_expires) < 0)
+ if (tmp == NULL || fasttime_cmp(&t->tv_expires, &tmp->tv_expires) < 0)
{
/* Put first in list and modify the timer value */
t->prev = NULL;
start_timer1(delay_us);
} else {
/* Put in correct place in list */
- while (tmp->next &&
- timeval_cmp(&t->tv_expires, &tmp->next->tv_expires) > 0)
+ while (tmp->next && fasttime_cmp(&t->tv_expires,
+ &tmp->next->tv_expires) > 0)
{
tmp = tmp->next;
}
D2(printk("start_one_shot_timer: %d us done\n", delay_us));
+done:
local_irq_restore(flags);
} /* start_one_shot_timer */
/* Timer 1 interrupt handler */
static irqreturn_t
-timer1_handler(int irq, void *dev_id, struct pt_regs *regs)
+timer1_handler(int irq, void *dev_id)
{
struct fast_timer *t;
unsigned long flags;
+ /* We keep interrupts disabled not only when we modify the
+ * fast timer list, but any time we hold a reference to a
+ * timer in the list, since del_fast_timer may be called
+ * from (another) interrupt context. Thus, the only time
+ * when interrupts are enabled is when calling the timer
+ * callback function.
+ */
local_irq_save(flags);
/* Clear timer1 irq */
fast_timer_running = 0;
fast_timer_ints++;
- local_irq_restore(flags);
-
t = fast_timer_list;
while (t)
{
- struct timeval tv;
+ struct fasttime_t tv;
+ fast_timer_function_type *f;
+ unsigned long d;
/* Has it really expired? */
do_gettimeofday_fast(&tv);
- D1(printk("t: %is %06ius\n", tv.tv_sec, tv.tv_usec));
+ D1(printk(KERN_DEBUG "t: %is %06ius\n",
+ tv.tv_jiff, tv.tv_usec));
- if (timeval_cmp(&t->tv_expires, &tv) <= 0)
+ if (fasttime_cmp(&t->tv_expires, &tv) <= 0)
{
/* Yes it has expired */
#ifdef FAST_TIMER_LOG
fast_timers_expired++;
/* Remove this timer before call, since it may reuse the timer */
- local_irq_save(flags);
if (t->prev)
{
t->prev->next = t->next;
}
t->prev = NULL;
t->next = NULL;
- local_irq_restore(flags);
- if (t->function != NULL)
- {
- t->function(t->data);
- }
- else
- {
+ /* Save function callback data before enabling
+ * interrupts, since the timer may be removed and
+ * we don't know how it was allocated
+ * (e.g. ->function and ->data may become overwritten
+ * after deletion if the timer was stack-allocated).
+ */
+ f = t->function;
+ d = t->data;
+
+ if (f != NULL) {
+ /* Run callback with interrupts enabled. */
+ local_irq_restore(flags);
+ f(d);
+ local_irq_save(flags);
+ } else
DEBUG_LOG("!timer1 %i function==NULL!\n", fast_timer_ints);
- }
}
else
{
D1(printk(".\n"));
}
- local_irq_save(flags);
if ((t = fast_timer_list) != NULL)
{
/* Start next timer.. */
- long us;
- struct timeval tv;
+ long us = 0;
+ struct fasttime_t tv;
do_gettimeofday_fast(&tv);
- us = ((t->tv_expires.tv_sec - tv.tv_sec) * 1000000 +
- t->tv_expires.tv_usec - tv.tv_usec);
+
+ /* time_after_eq takes care of wrapping */
+ if (time_after_eq(t->tv_expires.tv_jiff, tv.tv_jiff))
+ us = ((t->tv_expires.tv_jiff - tv.tv_jiff) *
+ 1000000 / HZ + t->tv_expires.tv_usec -
+ tv.tv_usec);
+
if (us > 0)
{
if (!fast_timer_running)
#endif
start_timer1(us);
}
- local_irq_restore(flags);
break;
}
else
D1(printk("e! %d\n", us));
}
}
- local_irq_restore(flags);
}
+ local_irq_restore(flags);
+
if (!t)
{
D1(printk("t1 stop!\n"));
void schedule_usleep(unsigned long us)
{
struct fast_timer t;
-#ifdef DECLARE_WAITQUEUE
wait_queue_head_t sleep_wait;
init_waitqueue_head(&sleep_wait);
- {
- DECLARE_WAITQUEUE(wait, current);
-#else
- struct wait_queue *sleep_wait = NULL;
- struct wait_queue wait = { current, NULL };
-#endif
D1(printk("schedule_usleep(%d)\n", us));
- add_wait_queue(&sleep_wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
start_one_shot_timer(&t, wake_up_func, (unsigned long)&sleep_wait, us,
"usleep");
- schedule();
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&sleep_wait, &wait);
+ /* Uninterruptible sleep on the fast timer. (The condition is somewhat
+ * redundant since the timer is what wakes us up.) */
+ wait_event(sleep_wait, !fast_timer_pending(&t));
+
D1(printk("done schedule_usleep(%d)\n", us));
-#ifdef DECLARE_WAITQUEUE
- }
-#endif
}
#ifdef CONFIG_PROC_FS
unsigned long flags;
int i = 0;
int num_to_show;
- struct timeval tv;
+ struct fasttime_t tv;
struct fast_timer *t, *nextt;
static char *bigbuf = NULL;
static unsigned long used;
if (!bigbuf && !(bigbuf = vmalloc(BIG_BUF_SIZE)))
{
used = 0;
- bigbuf[0] = '\0';
+ if (buf)
+ buf[0] = '\0';
return 0;
}
used += sprintf(bigbuf + used, "Fast timer running: %s\n",
fast_timer_running ? "yes" : "no");
used += sprintf(bigbuf + used, "Current time: %lu.%06lu\n",
- (unsigned long)tv.tv_sec,
+ (unsigned long)tv.tv_jiff,
(unsigned long)tv.tv_usec);
#ifdef FAST_TIMER_SANITY_CHECKS
used += sprintf(bigbuf + used, "Sanity failed: %i\n",
"d: %6li us data: 0x%08lX"
"\n",
t->name,
- (unsigned long)t->tv_set.tv_sec,
+ (unsigned long)t->tv_set.tv_jiff,
(unsigned long)t->tv_set.tv_usec,
- (unsigned long)t->tv_expires.tv_sec,
+ (unsigned long)t->tv_expires.tv_jiff,
(unsigned long)t->tv_expires.tv_usec,
t->delay_us,
t->data
"d: %6li us data: 0x%08lX"
"\n",
t->name,
- (unsigned long)t->tv_set.tv_sec,
+ (unsigned long)t->tv_set.tv_jiff,
(unsigned long)t->tv_set.tv_usec,
- (unsigned long)t->tv_expires.tv_sec,
+ (unsigned long)t->tv_expires.tv_jiff,
(unsigned long)t->tv_expires.tv_usec,
t->delay_us,
t->data
"d: %6li us data: 0x%08lX"
"\n",
t->name,
- (unsigned long)t->tv_set.tv_sec,
+ (unsigned long)t->tv_set.tv_jiff,
(unsigned long)t->tv_set.tv_usec,
- (unsigned long)t->tv_expires.tv_sec,
+ (unsigned long)t->tv_expires.tv_jiff,
(unsigned long)t->tv_expires.tv_usec,
t->delay_us,
t->data
/* " func: 0x%08lX" */
"\n",
t->name,
- (unsigned long)t->tv_set.tv_sec,
+ (unsigned long)t->tv_set.tv_jiff,
(unsigned long)t->tv_set.tv_usec,
- (unsigned long)t->tv_expires.tv_sec,
+ (unsigned long)t->tv_expires.tv_jiff,
(unsigned long)t->tv_expires.tv_usec,
t->delay_us,
t->data
/* , t->function */
);
- local_irq_disable();
+ local_irq_save(flags);
if (t->next != nextt)
{
printk(KERN_WARNING "timer removed!\n");
static struct fast_timer tr[10];
static int exp_num[10];
-static struct timeval tv_exp[100];
+static struct fasttime_t tv_exp[100];
static void test_timeout(unsigned long data)
{
int prev_num;
int j;
- struct timeval tv, tv0, tv1, tv2;
+ struct fasttime_t tv, tv0, tv1, tv2;
printk("fast_timer_test() start\n");
do_gettimeofday_fast(&tv);
{
do_gettimeofday_fast(&tv_exp[j]);
}
- printk("fast_timer_test() %is %06i\n", tv.tv_sec, tv.tv_usec);
+ printk(KERN_DEBUG "fast_timer_test() %is %06i\n",
+ tv.tv_jiff, tv.tv_usec);
for (j = 0; j < 1000; j++)
{
}
for (j = 0; j < 100; j++)
{
- printk("%i.%i %i.%i %i.%i %i.%i %i.%i\n",
- tv_exp[j].tv_sec,tv_exp[j].tv_usec,
- tv_exp[j+1].tv_sec,tv_exp[j+1].tv_usec,
- tv_exp[j+2].tv_sec,tv_exp[j+2].tv_usec,
- tv_exp[j+3].tv_sec,tv_exp[j+3].tv_usec,
- tv_exp[j+4].tv_sec,tv_exp[j+4].tv_usec);
+ printk(KERN_DEBUG "%i.%i %i.%i %i.%i %i.%i %i.%i\n",
+ tv_exp[j].tv_jiff, tv_exp[j].tv_usec,
+ tv_exp[j+1].tv_jiff, tv_exp[j+1].tv_usec,
+ tv_exp[j+2].tv_jiff, tv_exp[j+2].tv_usec,
+ tv_exp[j+3].tv_jiff, tv_exp[j+3].tv_usec,
+ tv_exp[j+4].tv_jiff, tv_exp[j+4].tv_usec);
j += 4;
}
do_gettimeofday_fast(&tv0);
}
}
do_gettimeofday_fast(&tv2);
- printk("Timers started %is %06i\n", tv0.tv_sec, tv0.tv_usec);
- printk("Timers started at %is %06i\n", tv1.tv_sec, tv1.tv_usec);
- printk("Timers done %is %06i\n", tv2.tv_sec, tv2.tv_usec);
+ printk(KERN_DEBUG "Timers started %is %06i\n",
+ tv0.tv_jiff, tv0.tv_usec);
+ printk(KERN_DEBUG "Timers started at %is %06i\n",
+ tv1.tv_jiff, tv1.tv_usec);
+ printk(KERN_DEBUG "Timers done %is %06i\n",
+ tv2.tv_jiff, tv2.tv_usec);
DP(printk("buf0:\n");
printk(buf0);
printk("buf1:\n");
printk("%-10s set: %6is %06ius exp: %6is %06ius "
"data: 0x%08X func: 0x%08X\n",
t->name,
- t->tv_set.tv_sec,
+ t->tv_set.tv_jiff,
t->tv_set.tv_usec,
- t->tv_expires.tv_sec,
+ t->tv_expires.tv_jiff,
t->tv_expires.tv_usec,
t->data,
t->function
printk(" del: %6ius did exp: %6is %06ius as #%i error: %6li\n",
t->delay_us,
- tv_exp[j].tv_sec,
+ tv_exp[j].tv_jiff,
tv_exp[j].tv_usec,
exp_num[j],
- (tv_exp[j].tv_sec - t->tv_expires.tv_sec)*1000000 + tv_exp[j].tv_usec - t->tv_expires.tv_usec);
+ (tv_exp[j].tv_jiff - t->tv_expires.tv_jiff) *
+ 1000000 + tv_exp[j].tv_usec -
+ t->tv_expires.tv_usec);
}
proc_fasttimer_read(buf5, NULL, 0, 0, 0);
printk("buf5 after all done:\n");
#endif
-void fast_timer_init(void)
+int fast_timer_init(void)
{
/* For some reason, request_irq() hangs when called froom time_init() */
if (!fast_timer_is_init)
fast_timer_test();
#endif
}
+ return 0;
}
+__initcall(fast_timer_init);
static struct if_group *get_group(const unsigned char groups)
{
int i;
- for (i = 0; i < sizeof(if_groups)/sizeof(struct if_group); i++) {
+ for (i = 0; i < ARRAY_SIZE(if_groups); i++) {
if (groups & if_groups[i].group) {
return &if_groups[i];
}
*/
#include <asm/irq.h>
+#include <asm/current.h>
#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/init.h>
+/* From kgdb.c. */
+extern void kgdb_init(void);
+extern void breakpoint(void);
+
#define mask_irq(irq_nr) (*R_VECT_MASK_CLR = 1 << (irq_nr));
#define unmask_irq(irq_nr) (*R_VECT_MASK_SET = 1 << (irq_nr));
BUILD_IRQ(13, 0x2000)
void mmu_bus_fault(void); /* IRQ 14 is the bus fault interrupt */
void multiple_interrupt(void); /* IRQ 15 is the multiple IRQ interrupt */
-BUILD_IRQ(16, 0x10000)
-BUILD_IRQ(17, 0x20000)
+BUILD_IRQ(16, 0x10000 | 0x20000) /* ethernet tx interrupt needs to block rx */
+BUILD_IRQ(17, 0x20000 | 0x10000) /* ...and vice versa */
BUILD_IRQ(18, 0x40000)
BUILD_IRQ(19, 0x80000)
BUILD_IRQ(20, 0x100000)
void do_sigtrap(void); /* from entry.S */
void gdb_handle_breakpoint(void); /* from entry.S */
+extern void do_IRQ(int irq, struct pt_regs * regs);
+
+/* Handle multiple IRQs */
+void do_multiple_IRQ(struct pt_regs* regs)
+{
+ int bit;
+ unsigned masked;
+ unsigned mask;
+ unsigned ethmask = 0;
+
+ /* Get interrupts to mask and handle */
+ mask = masked = *R_VECT_MASK_RD;
+
+ /* Never mask timer IRQ */
+ mask &= ~(IO_MASK(R_VECT_MASK_RD, timer0));
+
+ /*
+ * If either ethernet interrupt (rx or tx) is active then block
+ * the other one too. Unblock afterwards also.
+ */
+ if (mask &
+ (IO_STATE(R_VECT_MASK_RD, dma0, active) |
+ IO_STATE(R_VECT_MASK_RD, dma1, active))) {
+ ethmask = (IO_MASK(R_VECT_MASK_RD, dma0) |
+ IO_MASK(R_VECT_MASK_RD, dma1));
+ }
+
+ /* Block them */
+ *R_VECT_MASK_CLR = (mask | ethmask);
+
+ /* An extra irq_enter here to prevent softIRQs to run after
+ * each do_IRQ. This will decrease the interrupt latency.
+ */
+ irq_enter();
+
+ /* Handle all IRQs */
+ for (bit = 2; bit < 32; bit++) {
+ if (masked & (1 << bit)) {
+ do_IRQ(bit, regs);
+ }
+ }
+
+ /* This irq_exit() will trigger the soft IRQs. */
+ irq_exit();
+
+ /* Unblock the IRQs again */
+ *R_VECT_MASK_SET = (masked | ethmask);
+}
+
/* init_IRQ() is called by start_kernel and is responsible for fixing IRQ masks and
setting the irq vector table.
*/
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/delay.h>
+#include <linux/param.h>
#ifdef CONFIG_PROC_FS
#define HAS_FPU 0x0001
revision = rdvr();
- if (revision >= sizeof cpu_info/sizeof *cpu_info)
- info = &cpu_info[sizeof cpu_info/sizeof *cpu_info - 1];
+ if (revision >= ARRAY_SIZE(cpu_info))
+ info = &cpu_info[ARRAY_SIZE(cpu_info) - 1];
else
info = &cpu_info[revision];
-/* $Id: time.c,v 1.5 2004/09/29 06:12:46 starvik Exp $
- *
+/*
* linux/arch/cris/arch-v10/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/rtc.h>
+#include <asm/irq_regs.h>
/* define this if you need to use print_timestamp */
/* it will make jiffies at 96 hz instead of 100 hz though */
extern void cris_do_profile(struct pt_regs *regs);
static inline irqreturn_t
-timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+timer_interrupt(int irq, void *dev_id)
{
+ struct pt_regs *regs = get_irq_regs();
/* acknowledge the timer irq */
#ifdef USE_CASCADE_TIMERS
#endif
/* reset watchdog otherwise it resets us! */
-
reset_watchdog();
+ /* Update statistics. */
+ update_process_times(user_mode(regs));
+
/* call the real timer interrupt handler */
do_timer(1);
{
register char *dst __asm__ ("r13") = pdst;
-
+
/* This is NONPORTABLE, but since this whole routine is */
/* grossly nonportable that doesn't matter. */
If you want to check that the allocation was right; then
check the equalities in the first comment. It should say
"r13=r13, r12=r12, r11=r11" */
- __asm__ volatile ("
- ;; Check that the following is true (same register names on
- ;; both sides of equal sign, as in r8=r8):
- ;; %0=r13, %1=r12, %4=r11
- ;;
- ;; Save the registers we'll clobber in the movem process
- ;; on the stack. Don't mention them to gcc, it will only be
- ;; upset.
- subq 11*4,$sp
- movem $r10,[$sp]
-
- move.d $r11,$r0
- move.d $r11,$r1
- move.d $r11,$r2
- move.d $r11,$r3
- move.d $r11,$r4
- move.d $r11,$r5
- move.d $r11,$r6
- move.d $r11,$r7
- move.d $r11,$r8
- move.d $r11,$r9
- move.d $r11,$r10
-
- ;; Now we've got this:
- ;; r13 - dst
- ;; r12 - n
-
- ;; Update n for the first loop
- subq 12*4,$r12
-0:
- subq 12*4,$r12
- bge 0b
- movem $r11,[$r13+]
-
- addq 12*4,$r12 ;; compensate for last loop underflowing n
-
- ;; Restore registers from stack
- movem [$sp+],$r10"
+ __asm__ volatile ("\n\
+ ;; Check that the following is true (same register names on \n\
+ ;; both sides of equal sign, as in r8=r8): \n\
+ ;; %0=r13, %1=r12, %4=r11 \n\
+ ;; \n\
+ ;; Save the registers we'll clobber in the movem process \n\
+ ;; on the stack. Don't mention them to gcc, it will only be \n\
+ ;; upset. \n\
+ subq 11*4,$sp \n\
+ movem $r10,[$sp] \n\
+ \n\
+ move.d $r11,$r0 \n\
+ move.d $r11,$r1 \n\
+ move.d $r11,$r2 \n\
+ move.d $r11,$r3 \n\
+ move.d $r11,$r4 \n\
+ move.d $r11,$r5 \n\
+ move.d $r11,$r6 \n\
+ move.d $r11,$r7 \n\
+ move.d $r11,$r8 \n\
+ move.d $r11,$r9 \n\
+ move.d $r11,$r10 \n\
+ \n\
+ ;; Now we've got this: \n\
+ ;; r13 - dst \n\
+ ;; r12 - n \n\
+ \n\
+ ;; Update n for the first loop \n\
+ subq 12*4,$r12 \n\
+0: \n\
+ subq 12*4,$r12 \n\
+ bge 0b \n\
+ movem $r11,[$r13+] \n\
+ \n\
+ addq 12*4,$r12 ;; compensate for last loop underflowing n \n\
+ \n\
+ ;; Restore registers from stack \n\
+ movem [$sp+],$r10"
/* Outputs */ : "=r" (dst), "=r" (n)
/* Inputs */ : "0" (dst), "1" (n), "r" (lc));
-
+
}
/* Either we directly starts copying, using dword copying
- in a loop, or we copy as much as possible with 'movem'
+ in a loop, or we copy as much as possible with 'movem'
and then the last block (<44 bytes) is copied here.
This will work since 'movem' will have updated src,dst,n. */
If you want to check that the allocation was right; then
check the equalities in the first comment. It should say
"r13=r13, r11=r11, r12=r12" */
- __asm__ volatile ("
- ;; Check that the following is true (same register names on
- ;; both sides of equal sign, as in r8=r8):
- ;; %0=r13, %1=r11, %2=r12
- ;;
- ;; Save the registers we'll use in the movem process
- ;; on the stack.
- subq 11*4,$sp
- movem $r10,[$sp]
-
- ;; Now we've got this:
- ;; r11 - src
- ;; r13 - dst
- ;; r12 - n
-
- ;; Update n for the first loop
- subq 44,$r12
-0:
- movem [$r11+],$r10
- subq 44,$r12
- bge 0b
- movem $r10,[$r13+]
-
- addq 44,$r12 ;; compensate for last loop underflowing n
-
- ;; Restore registers from stack
- movem [$sp+],$r10"
+ __asm__ volatile ("\n\
+ ;; Check that the following is true (same register names on \n\
+ ;; both sides of equal sign, as in r8=r8): \n\
+ ;; %0=r13, %1=r11, %2=r12 \n\
+ ;; \n\
+ ;; Save the registers we'll use in the movem process \n\
+ ;; on the stack. \n\
+ subq 11*4,$sp \n\
+ movem $r10,[$sp] \n\
+ \n\
+ ;; Now we've got this: \n\
+ ;; r11 - src \n\
+ ;; r13 - dst \n\
+ ;; r12 - n \n\
+ \n\
+ ;; Update n for the first loop \n\
+ subq 44,$r12 \n\
+0: \n\
+ movem [$r11+],$r10 \n\
+ subq 44,$r12 \n\
+ bge 0b \n\
+ movem $r10,[$r13+] \n\
+ \n\
+ addq 44,$r12 ;; compensate for last loop underflowing n \n\
+ \n\
+ ;; Restore registers from stack \n\
+ movem [$sp+],$r10"
/* Outputs */ : "=r" (dst), "=r" (src), "=r" (n)
/* Inputs */ : "0" (dst), "1" (src), "2" (n));
.ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
.err \n\
.endif \n\
-
- ;; Save the registers we'll use in the movem process
- ;; on the stack.
- subq 11*4,$sp
- movem $r10,[$sp]
-
- ;; Now we've got this:
- ;; r11 - src
- ;; r13 - dst
- ;; r12 - n
-
- ;; Update n for the first loop
- subq 44,$r12
-
-; Since the noted PC of a faulting instruction in a delay-slot of a taken
-; branch, is that of the branch target, we actually point at the from-movem
-; for this case. There is no ambiguity here; if there was a fault in that
-; instruction (meaning a kernel oops), the faulted PC would be the address
-; after *that* movem.
-
-0:
- movem [$r11+],$r10
- subq 44,$r12
- bge 0b
- movem $r10,[$r13+]
-1:
- addq 44,$r12 ;; compensate for last loop underflowing n
-
- ;; Restore registers from stack
- movem [$sp+],$r10
-2:
- .section .fixup,\"ax\"
-
-; To provide a correct count in r10 of bytes that failed to be copied,
-; we jump back into the loop if the loop-branch was taken. There is no
-; performance penalty for sany use; the program will segfault soon enough.
-
-3:
- move.d [$sp],$r10
- addq 44,$r10
- move.d $r10,[$sp]
- jump 0b
-4:
- movem [$sp+],$r10
- addq 44,$r10
- addq 44,$r12
- jump 2b
-
- .previous
- .section __ex_table,\"a\"
- .dword 0b,3b
- .dword 1b,4b
+ \n\
+ ;; Save the registers we'll use in the movem process \n\
+ ;; on the stack. \n\
+ subq 11*4,$sp \n\
+ movem $r10,[$sp] \n\
+ \n\
+ ;; Now we've got this: \n\
+ ;; r11 - src \n\
+ ;; r13 - dst \n\
+ ;; r12 - n \n\
+ \n\
+ ;; Update n for the first loop \n\
+ subq 44,$r12 \n\
+ \n\
+; Since the noted PC of a faulting instruction in a delay-slot of a taken \n\
+; branch, is that of the branch target, we actually point at the from-movem \n\
+; for this case. There is no ambiguity here; if there was a fault in that \n\
+; instruction (meaning a kernel oops), the faulted PC would be the address \n\
+; after *that* movem. \n\
+ \n\
+0: \n\
+ movem [$r11+],$r10 \n\
+ subq 44,$r12 \n\
+ bge 0b \n\
+ movem $r10,[$r13+] \n\
+1: \n\
+ addq 44,$r12 ;; compensate for last loop underflowing n \n\
+ \n\
+ ;; Restore registers from stack \n\
+ movem [$sp+],$r10 \n\
+2: \n\
+ .section .fixup,\"ax\" \n\
+ \n\
+; To provide a correct count in r10 of bytes that failed to be copied, \n\
+; we jump back into the loop if the loop-branch was taken. There is no \n\
+; performance penalty for sany use; the program will segfault soon enough.\n\
+ \n\
+3: \n\
+ move.d [$sp],$r10 \n\
+ addq 44,$r10 \n\
+ move.d $r10,[$sp] \n\
+ jump 0b \n\
+4: \n\
+ movem [$sp+],$r10 \n\
+ addq 44,$r10 \n\
+ addq 44,$r12 \n\
+ jump 2b \n\
+ \n\
+ .previous \n\
+ .section __ex_table,\"a\" \n\
+ .dword 0b,3b \n\
+ .dword 1b,4b \n\
.previous"
/* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
If you want to check that the allocation was right; then
check the equalities in the first comment. It should say
"r13=r13, r11=r11, r12=r12" */
- __asm__ volatile ("
+ __asm__ volatile ("\n\
.ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
.err \n\
.endif \n\
-
- ;; Save the registers we'll use in the movem process
- ;; on the stack.
- subq 11*4,$sp
- movem $r10,[$sp]
-
- ;; Now we've got this:
- ;; r11 - src
- ;; r13 - dst
- ;; r12 - n
-
- ;; Update n for the first loop
- subq 44,$r12
-0:
- movem [$r11+],$r10
-1:
- subq 44,$r12
- bge 0b
- movem $r10,[$r13+]
-
- addq 44,$r12 ;; compensate for last loop underflowing n
-
- ;; Restore registers from stack
- movem [$sp+],$r10
-4:
- .section .fixup,\"ax\"
-
-;; Do not jump back into the loop if we fail. For some uses, we get a
-;; page fault somewhere on the line. Without checking for page limits,
-;; we don't know where, but we need to copy accurately and keep an
-;; accurate count; not just clear the whole line. To do that, we fall
-;; down in the code below, proceeding with smaller amounts. It should
-;; be kept in mind that we have to cater to code like what at one time
-;; was in fs/super.c:
-;; i = size - copy_from_user((void *)page, data, size);
-;; which would cause repeated faults while clearing the remainder of
-;; the SIZE bytes at PAGE after the first fault.
-;; A caveat here is that we must not fall through from a failing page
-;; to a valid page.
-
-3:
- movem [$sp+],$r10
- addq 44,$r12 ;; Get back count before faulting point.
- subq 44,$r11 ;; Get back pointer to faulting movem-line.
- jump 4b ;; Fall through, pretending the fault didn't happen.
-
- .previous
- .section __ex_table,\"a\"
- .dword 1b,3b
+ \n\
+ ;; Save the registers we'll use in the movem process \n\
+ ;; on the stack. \n\
+ subq 11*4,$sp \n\
+ movem $r10,[$sp] \n\
+ \n\
+ ;; Now we've got this: \n\
+ ;; r11 - src \n\
+ ;; r13 - dst \n\
+ ;; r12 - n \n\
+ \n\
+ ;; Update n for the first loop \n\
+ subq 44,$r12 \n\
+0: \n\
+ movem [$r11+],$r10 \n\
+1: \n\
+ subq 44,$r12 \n\
+ bge 0b \n\
+ movem $r10,[$r13+] \n\
+ \n\
+ addq 44,$r12 ;; compensate for last loop underflowing n \n\
+ \n\
+ ;; Restore registers from stack \n\
+ movem [$sp+],$r10 \n\
+4: \n\
+ .section .fixup,\"ax\" \n\
+ \n\
+;; Do not jump back into the loop if we fail. For some uses, we get a \n\
+;; page fault somewhere on the line. Without checking for page limits, \n\
+;; we don't know where, but we need to copy accurately and keep an \n\
+;; accurate count; not just clear the whole line. To do that, we fall \n\
+;; down in the code below, proceeding with smaller amounts. It should \n\
+;; be kept in mind that we have to cater to code like what at one time \n\
+;; was in fs/super.c: \n\
+;; i = size - copy_from_user((void *)page, data, size); \n\
+;; which would cause repeated faults while clearing the remainder of \n\
+;; the SIZE bytes at PAGE after the first fault. \n\
+;; A caveat here is that we must not fall through from a failing page \n\
+;; to a valid page. \n\
+ \n\
+3: \n\
+ movem [$sp+],$r10 \n\
+ addq 44,$r12 ;; Get back count before faulting point. \n\
+ subq 44,$r11 ;; Get back pointer to faulting movem-line. \n\
+ jump 4b ;; Fall through, pretending the fault didn't happen.\n\
+ \n\
+ .previous \n\
+ .section __ex_table,\"a\" \n\
+ .dword 1b,3b \n\
.previous"
/* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
If you want to check that the allocation was right; then
check the equalities in the first comment. It should say
something like "r13=r13, r11=r11, r12=r12". */
- __asm__ volatile ("
+ __asm__ volatile ("\n\
.ifnc %0%1%2,$r13$r12$r10 \n\
.err \n\
.endif \n\
-
- ;; Save the registers we'll clobber in the movem process
- ;; on the stack. Don't mention them to gcc, it will only be
- ;; upset.
- subq 11*4,$sp
- movem $r10,[$sp]
-
- clear.d $r0
- clear.d $r1
- clear.d $r2
- clear.d $r3
- clear.d $r4
- clear.d $r5
- clear.d $r6
- clear.d $r7
- clear.d $r8
- clear.d $r9
- clear.d $r10
- clear.d $r11
-
- ;; Now we've got this:
- ;; r13 - dst
- ;; r12 - n
-
- ;; Update n for the first loop
- subq 12*4,$r12
-0:
- subq 12*4,$r12
- bge 0b
- movem $r11,[$r13+]
-1:
- addq 12*4,$r12 ;; compensate for last loop underflowing n
-
- ;; Restore registers from stack
- movem [$sp+],$r10
-2:
- .section .fixup,\"ax\"
-3:
- move.d [$sp],$r10
- addq 12*4,$r10
- move.d $r10,[$sp]
- clear.d $r10
- jump 0b
-
-4:
- movem [$sp+],$r10
- addq 12*4,$r10
- addq 12*4,$r12
- jump 2b
-
- .previous
- .section __ex_table,\"a\"
- .dword 0b,3b
- .dword 1b,4b
+ \n\
+ ;; Save the registers we'll clobber in the movem process \n\
+ ;; on the stack. Don't mention them to gcc, it will only be \n\
+ ;; upset. \n\
+ subq 11*4,$sp \n\
+ movem $r10,[$sp] \n\
+ \n\
+ clear.d $r0 \n\
+ clear.d $r1 \n\
+ clear.d $r2 \n\
+ clear.d $r3 \n\
+ clear.d $r4 \n\
+ clear.d $r5 \n\
+ clear.d $r6 \n\
+ clear.d $r7 \n\
+ clear.d $r8 \n\
+ clear.d $r9 \n\
+ clear.d $r10 \n\
+ clear.d $r11 \n\
+ \n\
+ ;; Now we've got this: \n\
+ ;; r13 - dst \n\
+ ;; r12 - n \n\
+ \n\
+ ;; Update n for the first loop \n\
+ subq 12*4,$r12 \n\
+0: \n\
+ subq 12*4,$r12 \n\
+ bge 0b \n\
+ movem $r11,[$r13+] \n\
+1: \n\
+ addq 12*4,$r12 ;; compensate for last loop underflowing n\n\
+ \n\
+ ;; Restore registers from stack \n\
+ movem [$sp+],$r10 \n\
+2: \n\
+ .section .fixup,\"ax\" \n\
+3: \n\
+ move.d [$sp],$r10 \n\
+ addq 12*4,$r10 \n\
+ move.d $r10,[$sp] \n\
+ clear.d $r10 \n\
+ jump 0b \n\
+ \n\
+4: \n\
+ movem [$sp+],$r10 \n\
+ addq 12*4,$r10 \n\
+ addq 12*4,$r12 \n\
+ jump 2b \n\
+ \n\
+ .previous \n\
+ .section __ex_table,\"a\" \n\
+ .dword 0b,3b \n\
+ .dword 1b,4b \n\
.previous"
/* Outputs */ : "=r" (dst), "=r" (n), "=r" (retn)
select MTD
select MTD_CFI
select MTD_CFI_AMDSTD
- select MTD_OBSOLETE_CHIPS
- select MTD_AMDSTD
select MTD_CHAR
select MTD_BLOCK
select MTD_PARTITIONS
"%s: Probing a 0x%08lx bytes large window at 0x%08lx.\n",
map_cs->name, map_cs->size, map_cs->map_priv_1);
-#ifdef CONFIG_MTD_AMDSTD
- mtd_cs = do_map_probe("amd_flash", map_cs);
-#endif
#ifdef CONFIG_MTD_CFI
- if (!mtd_cs) {
mtd_cs = do_map_probe("cfi_probe", map_cs);
- }
+#endif
+#ifdef CONFIG_MTD_JEDECPROBE
+ if (!mtd_cs)
+ mtd_cs = do_map_probe("jedec_probe", map_cs);
#endif
return mtd_cs;
}
};
-#define NUMBER_OF_PORTS (sizeof(ports)/sizeof(sync_port))
+#define NUMBER_OF_PORTS ARRAY_SIZE(ports)
static const struct file_operations sync_serial_fops = {
.owner = THIS_MODULE,
--- /dev/null
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/arch/cache.h>
+#include <asm/arch/hwregs/dma.h>
+
+/* This file is used to workaround a cache bug, Guinness TR 106. */
+
+inline void flush_dma_descr(struct dma_descr_data *descr, int flush_buf)
+{
+ /* Flush descriptor to make sure we get correct in_eop and after. */
+ asm volatile ("ftagd [%0]" :: "r" (descr));
+ /* Flush buffer pointed out by descriptor. */
+ if (flush_buf)
+ cris_flush_cache_range(phys_to_virt((unsigned)descr->buf),
+ (unsigned)(descr->after - descr->buf));
+}
+EXPORT_SYMBOL(flush_dma_descr);
+
+void flush_dma_list(struct dma_descr_data *descr)
+{
+ while (1) {
+ flush_dma_descr(descr, 1);
+ if (descr->eol)
+ break;
+ descr = phys_to_virt((unsigned)descr->next);
+ }
+}
+EXPORT_SYMBOL(flush_dma_list);
+
+/* From cacheflush.S */
+EXPORT_SYMBOL(cris_flush_cache);
+/* From cacheflush.S */
+EXPORT_SYMBOL(cris_flush_cache_range);
--- /dev/null
+ .global cris_flush_cache_range
+cris_flush_cache_range:
+ move.d 1024, $r12
+ cmp.d $r11, $r12
+ bhi cris_flush_1KB
+ nop
+ add.d $r10, $r11
+ ftagd [$r10]
+cris_flush_last:
+ addq 32, $r10
+ cmp.d $r11, $r10
+ blt cris_flush_last
+ ftagd [$r10]
+ ret
+ nop
+cris_flush_1KB:
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ftagd [$r10]
+ addq 32, $r10
+ ba cris_flush_cache_range
+ sub.d $r12, $r11
+
+ .global cris_flush_cache
+cris_flush_cache:
+ moveq 0, $r10
+cris_flush_line:
+ move.d 16*1024, $r11
+ addq 16, $r10
+ cmp.d $r10, $r11
+ blt cris_flush_line
+ fidxd [$r10]
+ ret
+ nop
}
};
-#define NBR_OF_PORTS sizeof(crisv32_ioports)/sizeof(struct crisv32_ioport)
+#define NBR_OF_PORTS ARRAY_SIZE(crisv32_ioports)
struct crisv32_iopin crisv32_led1_green;
struct crisv32_iopin crisv32_led1_red;
{
int i;
int cpu = (int)v - 1;
- int entries;
unsigned long revision;
struct cpu_info *info;
- entries = sizeof cpinfo / sizeof(struct cpu_info);
- info = &cpinfo[entries - 1];
+ info = &cpinfo[ARRAY_SIZE(cpinfo) - 1];
#ifdef CONFIG_SMP
if (!cpu_online(cpu))
revision = rdvr();
- for (i = 0; i < entries; i++) {
+ for (i = 0; i < ARRAY_SIZE(cpinfo); i++) {
if (cpinfo[i].rev == revision) {
info = &cpinfo[i];
break;
CONFIG_MTD_RAM=y
# CONFIG_MTD_ROM is not set
# CONFIG_MTD_ABSENT is not set
-CONFIG_MTD_OBSOLETE_CHIPS=y
-CONFIG_MTD_AMDSTD=y
# CONFIG_MTD_SHARP is not set
# CONFIG_MTD_JEDEC is not set
# CONFIG_BLK_DEV_FD is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_UB is not set
CONFIG_BLK_DEV_RAM=y
#
# ATA/ATAPI/MFM/RLL support
#
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDE=y
+# CONFIG_IDE is not set
+# CONFIG_PARIDE is not set
#
# Please see Documentation/ide.txt for help/info on IDE drives
#
# CONFIG_BLK_DEV_IDE_SATA is not set
-CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
-CONFIG_BLK_DEV_IDECD=y
# CONFIG_BLK_DEV_IDETAPE is not set
# CONFIG_BLK_DEV_IDEFLOPPY is not set
# CONFIG_IDE_TASK_IOCTL is not set
#
# CONFIG_IDE_GENERIC is not set
# CONFIG_IDE_ARM is not set
-CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_IDEDMA_AUTO is not set
# CONFIG_BLK_DEV_HD is not set
# SCSI device support
#
# CONFIG_SCSI is not set
+# CONFIG_ISCSI_TCP is not set
#
# IEEE 1394 (FireWire) support
# CONFIG_NET_POLL_CONTROLLER is not set
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
-CONFIG_BT=y
-CONFIG_BT_L2CAP=y
-# CONFIG_BT_SCO is not set
-CONFIG_BT_RFCOMM=y
-# CONFIG_BT_RFCOMM_TTY is not set
-CONFIG_BT_BNEP=y
-# CONFIG_BT_BNEP_MC_FILTER is not set
-# CONFIG_BT_BNEP_PROTO_FILTER is not set
-# CONFIG_BT_HIDP is not set
-
-#
-# Bluetooth device drivers
-#
-CONFIG_BT_HCIUSB=y
-# CONFIG_BT_HCIUSB_SCO is not set
-# CONFIG_BT_HCIUART is not set
-# CONFIG_BT_HCIBCM203X is not set
-# CONFIG_BT_HCIBPA10X is not set
-# CONFIG_BT_HCIBFUSB is not set
-# CONFIG_BT_HCIVHCI is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_AF_RXRPC_DEBUG is not set
+# CONFIG_BT is not set
+# CONFIG_I2C is not set
+
CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
#
# Input device support
#
-CONFIG_INPUT=y
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
+# CONFIG_INPUT is not set
#
# Input I/O drivers
#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
CONFIG_SERIO=y
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
-# CONFIG_SERIO_CT82C710 is not set
-CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_LIBPS2 is not set
# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
#
# Input Device Drivers
# CONFIG_MOUSE_SERIAL is not set
# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
#
# Non-8250 serial port support
#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_GEN_RTC is not set
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
+# CONFIG_RTC_LIB is not set
+# CONFIG_RTC_CLASS is not set
#
# Ftape, the floppy tape device driver
# CONFIG_NFSD is not set
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
+# CONFIG_SUNRPC_BIND34 is not set
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
#
# CONFIG_SOUND is not set
+#
+# Generic devices
+#
+# CONFIG_SND_MPU401_UART is not set
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+
#
# PCCARD (PCMCIA/CardBus) support
#
# CONFIG_PCCARD is not set
+# CONFIG_PARPORT_PC_PCMCIA is not set
+# CONFIG_NET_PCMCIA is not set
#
# PC-card bridges
# USB Input Devices
#
# CONFIG_USB_HID is not set
+# HID_SUPPORT is not set
#
# USB HID Boot Protocol drivers
#
# Hardware crypto devices
-#
+# CONFIG_CRYPTO_HW is not set
#
# Library routines
extern void __ashldi3(void);
extern void __ashrdi3(void);
extern void __lshrdi3(void);
+extern void __negdi2(void);
extern void iounmap(volatile void * __iomem);
/* Platform dependent support */
EXPORT_SYMBOL(get_cmos_time);
EXPORT_SYMBOL(loops_per_usec);
-/* String functions */
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(strstr);
-EXPORT_SYMBOL(strcpy);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strncmp);
-EXPORT_SYMBOL(strncpy);
-
/* Math functions */
EXPORT_SYMBOL(__Udiv);
EXPORT_SYMBOL(__Umod);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__negdi2);
/* Memory functions */
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(del_fast_timer);
EXPORT_SYMBOL(schedule_usleep);
#endif
-
+EXPORT_SYMBOL(csum_partial);
*
* linux/arch/cris/kernel/irq.c
*
- * Copyright (c) 2000,2001 Axis Communications AB
+ * Copyright (c) 2000,2007 Axis Communications AB
*
* Authors: Bjorn Wesen (bjornw@axis.com)
*
asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
{
unsigned long sp;
+ struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
sp = rdsp();
if (unlikely((sp & (PAGE_SIZE - 1)) < (PAGE_SIZE/8))) {
printk("do_IRQ: stack overflow: %lX\n", sp);
show_stack(NULL, (unsigned long *)sp);
}
- __do_IRQ(irq, regs);
+ __do_IRQ(irq);
irq_exit();
+ set_irq_regs(old_regs);
}
void weird_irq(void)
*/
void (*pm_idle)(void);
+extern void default_idle(void);
+
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
/* notification of userspace execution resumption
* - triggered by current->work.notify_resume
*/
-extern int do_signal(int canrestart, sigset_t *oldset, struct pt_regs *regs);
+extern int do_signal(int canrestart, struct pt_regs *regs);
-void do_notify_resume(int canrestart, sigset_t *oldset, struct pt_regs *regs,
+void do_notify_resume(int canrestart, struct pt_regs *regs,
__u32 thread_info_flags )
{
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(canrestart,oldset,regs);
+ do_signal(canrestart,regs);
}
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
mon = CMOS_READ(RTC_MONTH);
year = CMOS_READ(RTC_YEAR);
- printk(KERN_DEBUG
- "rtc: sec 0x%x min 0x%x hour 0x%x day 0x%x mon 0x%x year 0x%x\n",
- sec, min, hour, day, mon, year);
-
BCD_TO_BIN(sec);
BCD_TO_BIN(min);
BCD_TO_BIN(hour);
cris_do_profile(struct pt_regs* regs)
{
-#if CONFIG_SYSTEM_PROFILER
+#ifdef CONFIG_SYSTEM_PROFILER
cris_profile_sample(regs);
#endif
-#if CONFIG_PROFILING
- profile_tick(CPU_PROFILING, regs);
+#ifdef CONFIG_PROFILING
+ profile_tick(CPU_PROFILING);
#endif
}
if (md->num_pages == 0) /* should not happen */
continue;
- flags = IORESOURCE_MEM;
+ flags = IORESOURCE_MEM | IORESOURCE_BUSY;
switch (md->type) {
case EFI_MEMORY_MAPPED_IO:
case EFI_ACPI_MEMORY_NVS:
name = "ACPI Non-volatile Storage";
- flags |= IORESOURCE_BUSY;
break;
case EFI_UNUSABLE_MEMORY:
name = "reserved";
- flags |= IORESOURCE_BUSY | IORESOURCE_DISABLED;
+ flags |= IORESOURCE_DISABLED;
break;
case EFI_RESERVED_TYPE:
case EFI_ACPI_RECLAIM_MEMORY:
default:
name = "reserved";
- flags |= IORESOURCE_BUSY;
break;
}
break;
default:
+ spin_unlock_irq(¤t->sighand->siglock);
return -EINVAL;
}
recalc_sigpending();
intassign1 |= (uint16_t)assign << 9;
break;
default:
+ spin_unlock_irq(&desc->lock);
return -EINVAL;
}
intassign3 |= (uint16_t)assign << 12;
break;
default:
+ spin_unlock_irq(&desc->lock);
return -EINVAL;
}
$(Q)mkdir -p $(objtree)/include/asm-um
$(Q)ln -fsn $(srctree)/include/asm-$(HEADER_ARCH) include/asm-um/arch
else
- $(Q)cd $(TOPDIR)/include/asm-um && ln -sf ../asm-$(HEADER_ARCH) arch
+ $(Q)cd $(TOPDIR)/include/asm-um && ln -fsn ../asm-$(SUBARCH) arch
endif
$(objtree)/$(ARCH_DIR)/include:
ifneq ($(KBUILD_SRC),)
$(Q)ln -fsn $(srctree)/$(ARCH_DIR)/include/sysdep-$(SUBARCH) $(ARCH_DIR)/include/sysdep
else
- $(Q)cd $(ARCH_DIR)/include && ln -sf sysdep-$(SUBARCH) sysdep
+ $(Q)cd $(ARCH_DIR)/include && ln -fsn sysdep-$(SUBARCH) sysdep
endif
$(ARCH_DIR)/os:
ifneq ($(KBUILD_SRC),)
$(Q)ln -fsn $(srctree)/$(ARCH_DIR)/os-$(OS) $(ARCH_DIR)/os
else
- $(Q)cd $(ARCH_DIR) && ln -sf os-$(OS) os
+ $(Q)cd $(ARCH_DIR) && ln -fsn os-$(OS) os
endif
# Generated files
.remove = net_remove,
};
+#ifdef CONFIG_INET
static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
.notifier_call = uml_inetaddr_event,
};
-static int uml_net_init(void)
+static void inet_register(void)
{
struct list_head *ele;
struct uml_net_private *lp;
struct in_device *ip;
struct in_ifaddr *in;
- mconsole_register_dev(&net_mc);
register_inetaddr_notifier(¨_inetaddr_notifier);
/* Devices may have been opened already, so the uml_inetaddr_notifier
}
}
spin_unlock(&opened_lock);
+}
+#else
+static inline void inet_register(void)
+{
+}
+#endif
+static int uml_net_init(void)
+{
+ mconsole_register_dev(&net_mc);
+ inet_register();
return 0;
}
/*
- * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#ifndef __USER_H__
#define __USER_H__
+#include "uml-config.h"
+
/*
* The usual definition - copied here because the kernel provides its own,
* fancier, type-safe, definition. Using that one would require
extern void panic(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
+
+#ifdef UML_CONFIG_PRINTK
extern int printk(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
+#else
+static inline int printk(const char *fmt, ...)
+{
+ return 0;
+}
+#endif
+
extern void schedule(void);
extern int in_aton(char *str);
extern int open_gdb_chan(void);
{
int err;
- err = request_irq(irq, handler, irqflags, devname, dev_id);
- if (err)
- return err;
-
- if (fd != -1)
+ if (fd != -1) {
err = activate_fd(irq, fd, type, dev_id);
- return err;
+ if (err)
+ return err;
+ }
+
+ return request_irq(irq, handler, irqflags, devname, dev_id);
}
+
EXPORT_SYMBOL(um_request_irq);
EXPORT_SYMBOL(reactivate_fd);
#include <sys/mman.h>
#include <sys/time.h>
#include <asm/unistd.h>
-#include <asm/page.h>
#include "as-layout.h"
#include "ptrace_user.h"
#include "skas.h"
n = recvmsg(fd, &msg, 0);
if(n < 0)
return -errno;
-
- else if(n != sizeof(iov.iov_len))
+ else if(n != iov.iov_len)
*helper_pid_out = -1;
cmsg = CMSG_FIRSTHDR(&msg);
NULL
};
+static cpumask_t mce_device_initialized = CPU_MASK_NONE;
+
/* Per cpu sysdev init. All of the cpus still share the same ctl bank */
static __cpuinit int mce_create_device(unsigned int cpu)
{
if (err)
goto error;
}
+ cpu_set(cpu, mce_device_initialized);
return 0;
error:
{
int i;
+ if (!cpu_isset(cpu, mce_device_initialized))
+ return;
+
for (i = 0; mce_attributes[i]; i++)
sysdev_remove_file(&per_cpu(device_mce,cpu),
mce_attributes[i]);
sysdev_unregister(&per_cpu(device_mce,cpu));
+ cpu_clear(cpu, mce_device_initialized);
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- int err = 0;
switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- err = mce_create_device(cpu);
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ mce_create_device(cpu);
break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
mce_remove_device(cpu);
break;
}
- return err ? NOTIFY_BAD : NOTIFY_OK;
+ return NOTIFY_OK;
}
static struct notifier_block mce_cpu_notifier = {
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/preempt.h>
#include <linux/delay.h>
#include <asm/processor.h>
{
unsigned long bclock, now;
+ preempt_disable(); /* TSC's are per-cpu */
rdtscl(bclock);
do {
rep_nop();
rdtscl(now);
} while ((now-bclock) < loops);
+ preempt_enable();
}
/*
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/preempt.h>
#include <linux/delay.h>
+
#include <asm/delay.h>
#include <asm/msr.h>
void __delay(unsigned long loops)
{
unsigned bclock, now;
-
+
+ preempt_disable(); /* TSC's are pre-cpu */
rdtscl(bclock);
- do
- {
+ do {
rep_nop();
rdtscl(now);
}
- while((now-bclock) < loops);
+ while ((now-bclock) < loops);
+ preempt_enable();
}
EXPORT_SYMBOL(__delay);
config ACPI_PROCFS
bool "Deprecated /proc/acpi files"
depends on PROC_FS
+ default y
---help---
For backwards compatibility, this option allows
deprecated /proc/acpi/ files to exist, even when
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
+#ifdef CONFIG_ACPI_PROCFS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#endif
#include <linux/power_supply.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
MODULE_DESCRIPTION("ACPI AC Adapter Driver");
MODULE_LICENSE("GPL");
+#ifdef CONFIG_ACPI_PROCFS
extern struct proc_dir_entry *acpi_lock_ac_dir(void);
extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
+static int acpi_ac_open_fs(struct inode *inode, struct file *file);
+#endif
static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device, int type);
-static int acpi_ac_open_fs(struct inode *inode, struct file *file);
+static int acpi_ac_resume(struct acpi_device *device);
const static struct acpi_device_id ac_device_ids[] = {
{"ACPI0003", 0},
.ops = {
.add = acpi_ac_add,
.remove = acpi_ac_remove,
+ .resume = acpi_ac_resume,
},
};
#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger);
+#ifdef CONFIG_ACPI_PROCFS
static const struct file_operations acpi_ac_fops = {
.open = acpi_ac_open_fs,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
+#endif
+
static int get_ac_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
return 0;
}
+#ifdef CONFIG_ACPI_PROCFS
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
return 0;
}
+#endif
/* --------------------------------------------------------------------------
Driver Model
if (result)
goto end;
+#ifdef CONFIG_ACPI_PROCFS
result = acpi_ac_add_fs(device);
+#endif
if (result)
goto end;
ac->charger.name = acpi_device_bid(device);
end:
if (result) {
+#ifdef CONFIG_ACPI_PROCFS
acpi_ac_remove_fs(device);
+#endif
kfree(ac);
}
return result;
}
+static int acpi_ac_resume(struct acpi_device *device)
+{
+ struct acpi_ac *ac;
+ unsigned old_state;
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+ ac = acpi_driver_data(device);
+ old_state = ac->state;
+ if (acpi_ac_get_state(ac))
+ return 0;
+ if (old_state != ac->state)
+ kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
+ return 0;
+}
+
static int acpi_ac_remove(struct acpi_device *device, int type)
{
acpi_status status = AE_OK;
ACPI_ALL_NOTIFY, acpi_ac_notify);
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
+#ifdef CONFIG_ACPI_PROCFS
acpi_ac_remove_fs(device);
+#endif
kfree(ac);
if (acpi_disabled)
return -ENODEV;
+#ifdef CONFIG_ACPI_PROCFS
acpi_ac_dir = acpi_lock_ac_dir();
if (!acpi_ac_dir)
return -ENODEV;
+#endif
result = acpi_bus_register_driver(&acpi_ac_driver);
if (result < 0) {
+#ifdef CONFIG_ACPI_PROCFS
acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
return -ENODEV;
}
acpi_bus_unregister_driver(&acpi_ac_driver);
+#ifdef CONFIG_ACPI_PROCFS
acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
return;
}
NULL,
&toshiba_backlight_data);
if (IS_ERR(toshiba_backlight_device)) {
+ int ret = PTR_ERR(toshiba_backlight_device);
+
printk(KERN_ERR "Could not register toshiba backlight device\n");
toshiba_backlight_device = NULL;
toshiba_acpi_exit();
+ return ret;
}
toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
return r;
}
-#define DBMSG(msg) ((verbose>1)?(msg):NULL)
-
static void pf_lock(struct pf_unit *pf, int func)
{
char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 };
- pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "unlock" : "lock");
+ pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "lock" : "unlock");
}
static void pf_eject(struct pf_unit *pf)
{ ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 };
char buf[8];
- pf_atapi(pf, ms_cmd, 8, buf, DBMSG("mode sense"));
+ pf_atapi(pf, ms_cmd, 8, buf, "mode sense");
pf->media_status = PF_RW;
if (buf[3] & 0x80)
pf->media_status = PF_RO;
char buf[8];
int bs;
- if (pf_atapi(pf, rc_cmd, 8, buf, DBMSG("get capacity"))) {
+ if (pf_atapi(pf, rc_cmd, 8, buf, "get capacity")) {
pf->media_status = PF_NM;
return;
}
pf_buf += 512;
pf_block++;
if (!pf_run)
- return 0;
- if (!pf_count)
return 1;
- spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(1);
- spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
- return 1;
+ if (!pf_count) {
+ spin_lock_irqsave(&pf_spin_lock, saved_flags);
+ pf_end_request(1);
+ pf_req = elv_next_request(pf_queue);
+ spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
+ if (!pf_req)
+ return 1;
+ pf_count = pf_req->current_nr_sectors;
+ pf_buf = pf_req->buffer;
+ }
+ return 0;
}
static inline void next_request(int success)
return 0;
}
+/*
+ * releasepage is called by pagevec_strip/try_to_release_page if
+ * buffers_heads_over_limit is true. Without a releasepage function
+ * try_to_free_buffers is called instead. That can unset the dirty
+ * bit of our ram disk pages, which will be eventually freed, even
+ * if the page is still in use.
+ */
+static int ramdisk_releasepage(struct page *page, gfp_t dummy)
+{
+ return 0;
+}
+
static const struct address_space_operations ramdisk_aops = {
.readpage = ramdisk_readpage,
.prepare_write = ramdisk_prepare_write,
.writepage = ramdisk_writepage,
.set_page_dirty = ramdisk_set_page_dirty,
.writepages = ramdisk_writepages,
+ .releasepage = ramdisk_releasepage,
};
static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector,
/* #define ATR_CSUM */
#ifdef PCMCIA_DEBUG
-#define reader_to_dev(x) (&handle_to_dev(x->p_dev->handle))
+#define reader_to_dev(x) (&handle_to_dev(x->p_dev))
static int pc_debug = PCMCIA_DEBUG;
module_param(pc_debug, int, 0600);
#define DEBUGP(n, rdr, x, args...) do { \
#ifdef PCMCIA_DEBUG
-#define reader_to_dev(x) (&handle_to_dev(x->p_dev->handle))
+#define reader_to_dev(x) (&handle_to_dev(x->p_dev))
static int pc_debug = PCMCIA_DEBUG;
module_param(pc_debug, int, 0600);
#define DEBUGP(n, rdr, x, args...) do { \
};
#endif
+static resource_size_t rtc_size;
+
+static struct resource * __init rtc_request_region(resource_size_t size)
+{
+ struct resource *r;
+
+ if (RTC_IOMAPPED)
+ r = request_region(RTC_PORT(0), size, "rtc");
+ else
+ r = request_mem_region(RTC_PORT(0), size, "rtc");
+
+ if (r)
+ rtc_size = size;
+
+ return r;
+}
+
+static void rtc_release_region(void)
+{
+ if (RTC_IOMAPPED)
+ release_region(RTC_PORT(0), rtc_size);
+ else
+ release_mem_region(RTC_PORT(0), rtc_size);
+}
+
static int __init rtc_init(void)
{
#ifdef CONFIG_PROC_FS
}
no_irq:
#else
- if (RTC_IOMAPPED)
- r = request_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc");
- else
- r = request_mem_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc");
+ r = rtc_request_region(RTC_IO_EXTENT);
+
+ /*
+ * If we've already requested a smaller range (for example, because
+ * PNPBIOS or ACPI told us how the device is configured), the request
+ * above might fail because it's too big.
+ *
+ * If so, request just the range we actually use.
+ */
+ if (!r)
+ r = rtc_request_region(RTC_IO_EXTENT_USED);
if (!r) {
#ifdef RTC_IRQ
rtc_has_irq = 0;
/* Yeah right, seeing as irq 8 doesn't even hit the bus. */
rtc_has_irq = 0;
printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
- if (RTC_IOMAPPED)
- release_region(RTC_PORT(0), RTC_IO_EXTENT);
- else
- release_mem_region(RTC_PORT(0), RTC_IO_EXTENT);
+ rtc_release_region();
return -EIO;
}
hpet_rtc_timer_init();
free_irq(RTC_IRQ, NULL);
rtc_has_irq = 0;
#endif
- release_region(RTC_PORT(0), RTC_IO_EXTENT);
+ rtc_release_region();
return -ENODEV;
}
if (rtc_has_irq)
free_irq (rtc_irq, &rtc_port);
#else
- if (RTC_IOMAPPED)
- release_region(RTC_PORT(0), RTC_IO_EXTENT);
- else
- release_mem_region(RTC_PORT(0), RTC_IO_EXTENT);
+ rtc_release_region();
#ifdef RTC_IRQ
if (rtc_has_irq)
free_irq (RTC_IRQ, NULL);
/* we are done once this client rejects
* an available resource
*/
- if (ack == DMA_ACK) {
+ if (ack == DMA_ACK)
dma_chan_get(chan);
- kref_get(&device->refcount);
- } else if (ack == DMA_NAK)
+ else if (ack == DMA_NAK)
return;
}
}
/* client was holding resources for this channel so
* free it
*/
- if (ack == DMA_ACK) {
+ if (ack == DMA_ACK)
dma_chan_put(chan);
- kref_put(&chan->device->refcount,
- dma_async_device_cleanup);
- }
}
mutex_unlock(&dma_list_mutex);
ack = client->event_callback(client, chan,
DMA_RESOURCE_REMOVED);
- if (ack == DMA_ACK) {
+ if (ack == DMA_ACK)
dma_chan_put(chan);
- kref_put(&chan->device->refcount,
- dma_async_device_cleanup);
- }
}
list_del(&client->global_node);
goto err_out;
}
+ /* One for the channel, one of the class device */
+ kref_get(&device->refcount);
kref_get(&device->refcount);
kref_init(&chan->refcount);
chan->slow_ref = 0;
MODULE_AUTHOR("Intel Corporation");
static struct pci_device_id ioat_pci_tbl[] = {
+ /* I/OAT v1 platforms */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
{ PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
+
+ /* I/OAT v2 platforms */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
{ 0, }
};
if (device->dma && ioat_dca_enabled)
device->dca = ioat_dca_init(pdev, iobase);
break;
+ case IOAT_VER_2_0:
+ device->dma = ioat_dma_probe(pdev, iobase);
+ if (device->dma && ioat_dca_enabled)
+ device->dca = ioat2_dca_init(pdev, iobase);
+ break;
default:
err = -ENODEV;
break;
}
+ if (!device->dma)
+ err = -ENODEV;
return err;
}
return dca;
}
+
+static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
+{
+ struct ioat_dca_priv *ioatdca = dca_priv(dca);
+ struct pci_dev *pdev;
+ int i;
+ u16 id;
+ u16 global_req_table;
+
+ /* This implementation only supports PCI-Express */
+ if (dev->bus != &pci_bus_type)
+ return -ENODEV;
+ pdev = to_pci_dev(dev);
+ id = dcaid_from_pcidev(pdev);
+
+ if (ioatdca->requester_count == ioatdca->max_requesters)
+ return -ENODEV;
+
+ for (i = 0; i < ioatdca->max_requesters; i++) {
+ if (ioatdca->req_slots[i].pdev == NULL) {
+ /* found an empty slot */
+ ioatdca->requester_count++;
+ ioatdca->req_slots[i].pdev = pdev;
+ ioatdca->req_slots[i].rid = id;
+ global_req_table =
+ readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
+ writel(id | IOAT_DCA_GREQID_VALID,
+ ioatdca->iobase + global_req_table + (i * 4));
+ return i;
+ }
+ }
+ /* Error, ioatdma->requester_count is out of whack */
+ return -EFAULT;
+}
+
+static int ioat2_dca_remove_requester(struct dca_provider *dca,
+ struct device *dev)
+{
+ struct ioat_dca_priv *ioatdca = dca_priv(dca);
+ struct pci_dev *pdev;
+ int i;
+ u16 global_req_table;
+
+ /* This implementation only supports PCI-Express */
+ if (dev->bus != &pci_bus_type)
+ return -ENODEV;
+ pdev = to_pci_dev(dev);
+
+ for (i = 0; i < ioatdca->max_requesters; i++) {
+ if (ioatdca->req_slots[i].pdev == pdev) {
+ global_req_table =
+ readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
+ writel(0, ioatdca->iobase + global_req_table + (i * 4));
+ ioatdca->req_slots[i].pdev = NULL;
+ ioatdca->req_slots[i].rid = 0;
+ ioatdca->requester_count--;
+ return i;
+ }
+ }
+ return -ENODEV;
+}
+
+static u8 ioat2_dca_get_tag(struct dca_provider *dca, int cpu)
+{
+ u8 tag;
+
+ tag = ioat_dca_get_tag(dca, cpu);
+ tag = (~tag) & 0x1F;
+ return tag;
+}
+
+static struct dca_ops ioat2_dca_ops = {
+ .add_requester = ioat2_dca_add_requester,
+ .remove_requester = ioat2_dca_remove_requester,
+ .get_tag = ioat2_dca_get_tag,
+};
+
+static int ioat2_dca_count_dca_slots(void *iobase, u16 dca_offset)
+{
+ int slots = 0;
+ u32 req;
+ u16 global_req_table;
+
+ global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
+ if (global_req_table == 0)
+ return 0;
+ do {
+ req = readl(iobase + global_req_table + (slots * sizeof(u32)));
+ slots++;
+ } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
+
+ return slots;
+}
+
+struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+{
+ struct dca_provider *dca;
+ struct ioat_dca_priv *ioatdca;
+ int slots;
+ int i;
+ int err;
+ u32 tag_map;
+ u16 dca_offset;
+ u16 csi_fsb_control;
+ u16 pcie_control;
+ u8 bit;
+
+ if (!system_has_dca_enabled(pdev))
+ return NULL;
+
+ dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
+ if (dca_offset == 0)
+ return NULL;
+
+ slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
+ if (slots == 0)
+ return NULL;
+
+ dca = alloc_dca_provider(&ioat2_dca_ops,
+ sizeof(*ioatdca)
+ + (sizeof(struct ioat_dca_slot) * slots));
+ if (!dca)
+ return NULL;
+
+ ioatdca = dca_priv(dca);
+ ioatdca->iobase = iobase;
+ ioatdca->dca_base = iobase + dca_offset;
+ ioatdca->max_requesters = slots;
+
+ /* some bios might not know to turn these on */
+ csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
+ if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
+ csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
+ writew(csi_fsb_control,
+ ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
+ }
+ pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
+ if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
+ pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
+ writew(pcie_control,
+ ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
+ }
+
+
+ /* TODO version, compatibility and configuration checks */
+
+ /* copy out the APIC to DCA tag map */
+ tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
+ for (i = 0; i < 5; i++) {
+ bit = (tag_map >> (4 * i)) & 0x0f;
+ if (bit < 8)
+ ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
+ else
+ ioatdca->tag_map[i] = 0;
+ }
+
+ err = register_dca_provider(dca, &pdev->dev);
+ if (err) {
+ free_dca_provider(dca);
+ return NULL;
+ }
+
+ return dca;
+}
#include "ioatdma_registers.h"
#include "ioatdma_hw.h"
-#define INITIAL_IOAT_DESC_COUNT 128
-
#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
+static int ioat_pending_level = 4;
+module_param(ioat_pending_level, int, 0644);
+MODULE_PARM_DESC(ioat_pending_level,
+ "high-water mark for pushing ioat descriptors (default: 4)");
+
/* internal functions */
static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
+
+static struct ioat_desc_sw *
+ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
static struct ioat_desc_sw *
-ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
+ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
struct ioatdma_device *device,
ioat_chan->device = device;
ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
ioat_chan->xfercap = xfercap;
+ ioat_chan->desccount = 0;
+ if (ioat_chan->device->version != IOAT_VER_1_2) {
+ writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
+ | IOAT_DMA_DCA_ANY_CPU,
+ ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+ }
spin_lock_init(&ioat_chan->cleanup_lock);
spin_lock_init(&ioat_chan->desc_lock);
INIT_LIST_HEAD(&ioat_chan->free_desc);
tx_to_ioat_desc(tx)->dst = addr;
}
-static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
+static inline void __ioat1_dma_memcpy_issue_pending(
+ struct ioat_dma_chan *ioat_chan);
+static inline void __ioat2_dma_memcpy_issue_pending(
+ struct ioat_dma_chan *ioat_chan);
+
+static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
struct ioat_desc_sw *prev, *new;
struct ioat_dma_descriptor *hw;
- int append = 0;
dma_cookie_t cookie;
LIST_HEAD(new_chain);
u32 copy;
list_add_tail(&new->node, &new_chain);
desc_count++;
prev = new;
- } while (len && (new = ioat_dma_get_next_descriptor(ioat_chan)));
+ } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
if (new->async_tx.callback) {
first->async_tx.phys;
__list_splice(&new_chain, ioat_chan->used_desc.prev);
+ ioat_chan->dmacount += desc_count;
ioat_chan->pending += desc_count;
- if (ioat_chan->pending >= 4) {
- append = 1;
- ioat_chan->pending = 0;
- }
+ if (ioat_chan->pending >= ioat_pending_level)
+ __ioat1_dma_memcpy_issue_pending(ioat_chan);
spin_unlock_bh(&ioat_chan->desc_lock);
- if (append)
- writeb(IOAT_CHANCMD_APPEND,
- ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
+ return cookie;
+}
+
+static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+ struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
+ struct ioat_desc_sw *new;
+ struct ioat_dma_descriptor *hw;
+ dma_cookie_t cookie;
+ u32 copy;
+ size_t len;
+ dma_addr_t src, dst;
+ int orig_ack;
+ unsigned int desc_count = 0;
+
+ /* src and dest and len are stored in the initial descriptor */
+ len = first->len;
+ src = first->src;
+ dst = first->dst;
+ orig_ack = first->async_tx.ack;
+ new = first;
+
+ /* ioat_chan->desc_lock is still in force in version 2 path */
+
+ do {
+ copy = min((u32) len, ioat_chan->xfercap);
+
+ new->async_tx.ack = 1;
+
+ hw = new->hw;
+ hw->size = copy;
+ hw->ctl = 0;
+ hw->src_addr = src;
+ hw->dst_addr = dst;
+
+ len -= copy;
+ dst += copy;
+ src += copy;
+ desc_count++;
+ } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
+
+ hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
+ if (new->async_tx.callback) {
+ hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
+ if (first != new) {
+ /* move callback into to last desc */
+ new->async_tx.callback = first->async_tx.callback;
+ new->async_tx.callback_param
+ = first->async_tx.callback_param;
+ first->async_tx.callback = NULL;
+ first->async_tx.callback_param = NULL;
+ }
+ }
+
+ new->tx_cnt = desc_count;
+ new->async_tx.ack = orig_ack; /* client is in control of this ack */
+
+ /* store the original values for use in later cleanup */
+ if (new != first) {
+ new->src = first->src;
+ new->dst = first->dst;
+ new->len = first->len;
+ }
+
+ /* cookie incr and addition to used_list must be atomic */
+ cookie = ioat_chan->common.cookie;
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ ioat_chan->common.cookie = new->async_tx.cookie = cookie;
+
+ ioat_chan->dmacount += desc_count;
+ ioat_chan->pending += desc_count;
+ if (ioat_chan->pending >= ioat_pending_level)
+ __ioat2_dma_memcpy_issue_pending(ioat_chan);
+ spin_unlock_bh(&ioat_chan->desc_lock);
return cookie;
}
+/**
+ * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
+ * @ioat_chan: the channel supplying the memory pool for the descriptors
+ * @flags: allocation flags
+ */
static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
struct ioat_dma_chan *ioat_chan,
gfp_t flags)
dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
desc_sw->async_tx.tx_set_src = ioat_set_src;
desc_sw->async_tx.tx_set_dest = ioat_set_dest;
- desc_sw->async_tx.tx_submit = ioat_tx_submit;
+ switch (ioat_chan->device->version) {
+ case IOAT_VER_1_2:
+ desc_sw->async_tx.tx_submit = ioat1_tx_submit;
+ break;
+ case IOAT_VER_2_0:
+ desc_sw->async_tx.tx_submit = ioat2_tx_submit;
+ break;
+ }
INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
+
desc_sw->hw = desc;
desc_sw->async_tx.phys = phys;
return desc_sw;
}
-/* returns the actual number of allocated descriptors */
+static int ioat_initial_desc_count = 256;
+module_param(ioat_initial_desc_count, int, 0644);
+MODULE_PARM_DESC(ioat_initial_desc_count,
+ "initial descriptors per channel (default: 256)");
+
+/**
+ * ioat2_dma_massage_chan_desc - link the descriptors into a circle
+ * @ioat_chan: the channel to be massaged
+ */
+static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
+{
+ struct ioat_desc_sw *desc, *_desc;
+
+ /* setup used_desc */
+ ioat_chan->used_desc.next = ioat_chan->free_desc.next;
+ ioat_chan->used_desc.prev = NULL;
+
+ /* pull free_desc out of the circle so that every node is a hw
+ * descriptor, but leave it pointing to the list
+ */
+ ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
+ ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
+
+ /* circle link the hw descriptors */
+ desc = to_ioat_desc(ioat_chan->free_desc.next);
+ desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
+ list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
+ desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
+ }
+}
+
+/**
+ * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
+ * @chan: the channel to be filled out
+ */
static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
/* have we already been set up? */
if (!list_empty(&ioat_chan->free_desc))
- return INITIAL_IOAT_DESC_COUNT;
+ return ioat_chan->desccount;
/* Setup register to interrupt and write completion status on error */
chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
}
/* Allocate descriptors */
- for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
+ for (i = 0; i < ioat_initial_desc_count; i++) {
desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
if (!desc) {
dev_err(&ioat_chan->device->pdev->dev,
list_add_tail(&desc->node, &tmp_list);
}
spin_lock_bh(&ioat_chan->desc_lock);
+ ioat_chan->desccount = i;
list_splice(&tmp_list, &ioat_chan->free_desc);
+ if (ioat_chan->device->version != IOAT_VER_1_2)
+ ioat2_dma_massage_chan_desc(ioat_chan);
spin_unlock_bh(&ioat_chan->desc_lock);
/* allocate a completion writeback area */
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
tasklet_enable(&ioat_chan->cleanup_task);
- ioat_dma_start_null_desc(ioat_chan);
- return i;
+ ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
+ return ioat_chan->desccount;
}
+/**
+ * ioat_dma_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+ */
static void ioat_dma_free_chan_resources(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
/* Delay 100ms after reset to allow internal DMA logic to quiesce
* before removing DMA descriptor resources.
*/
- writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
+ writeb(IOAT_CHANCMD_RESET,
+ ioat_chan->reg_base
+ + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
mdelay(100);
spin_lock_bh(&ioat_chan->desc_lock);
- list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
- in_use_descs++;
- list_del(&desc->node);
- pci_pool_free(ioatdma_device->dma_pool, desc->hw,
- desc->async_tx.phys);
- kfree(desc);
- }
- list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
- list_del(&desc->node);
+ switch (ioat_chan->device->version) {
+ case IOAT_VER_1_2:
+ list_for_each_entry_safe(desc, _desc,
+ &ioat_chan->used_desc, node) {
+ in_use_descs++;
+ list_del(&desc->node);
+ pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+ desc->async_tx.phys);
+ kfree(desc);
+ }
+ list_for_each_entry_safe(desc, _desc,
+ &ioat_chan->free_desc, node) {
+ list_del(&desc->node);
+ pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+ desc->async_tx.phys);
+ kfree(desc);
+ }
+ break;
+ case IOAT_VER_2_0:
+ list_for_each_entry_safe(desc, _desc,
+ ioat_chan->free_desc.next, node) {
+ list_del(&desc->node);
+ pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+ desc->async_tx.phys);
+ kfree(desc);
+ }
+ desc = to_ioat_desc(ioat_chan->free_desc.next);
pci_pool_free(ioatdma_device->dma_pool, desc->hw,
desc->async_tx.phys);
kfree(desc);
+ INIT_LIST_HEAD(&ioat_chan->free_desc);
+ INIT_LIST_HEAD(&ioat_chan->used_desc);
+ break;
}
spin_unlock_bh(&ioat_chan->desc_lock);
ioat_chan->last_completion = ioat_chan->completion_addr = 0;
ioat_chan->pending = 0;
+ ioat_chan->dmacount = 0;
}
/**
* has run out.
*/
static struct ioat_desc_sw *
-ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
+ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
{
struct ioat_desc_sw *new = NULL;
return new;
}
-static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
+static struct ioat_desc_sw *
+ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
+{
+ struct ioat_desc_sw *new = NULL;
+
+ /*
+ * used.prev points to where to start processing
+ * used.next points to next free descriptor
+ * if used.prev == NULL, there are none waiting to be processed
+ * if used.next == used.prev.prev, there is only one free descriptor,
+ * and we need to use it to as a noop descriptor before
+ * linking in a new set of descriptors, since the device
+ * has probably already read the pointer to it
+ */
+ if (ioat_chan->used_desc.prev &&
+ ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
+
+ struct ioat_desc_sw *desc = NULL;
+ struct ioat_desc_sw *noop_desc = NULL;
+ int i;
+
+ /* set up the noop descriptor */
+ noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
+ noop_desc->hw->size = 0;
+ noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
+ noop_desc->hw->src_addr = 0;
+ noop_desc->hw->dst_addr = 0;
+
+ ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
+ ioat_chan->pending++;
+ ioat_chan->dmacount++;
+
+ /* get a few more descriptors */
+ for (i = 16; i; i--) {
+ desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
+ BUG_ON(!desc);
+ list_add_tail(&desc->node, ioat_chan->used_desc.next);
+
+ desc->hw->next
+ = to_ioat_desc(desc->node.next)->async_tx.phys;
+ to_ioat_desc(desc->node.prev)->hw->next
+ = desc->async_tx.phys;
+ ioat_chan->desccount++;
+ }
+
+ ioat_chan->used_desc.next = noop_desc->node.next;
+ }
+ new = to_ioat_desc(ioat_chan->used_desc.next);
+ prefetch(new);
+ ioat_chan->used_desc.next = new->node.next;
+
+ if (ioat_chan->used_desc.prev == NULL)
+ ioat_chan->used_desc.prev = &new->node;
+
+ prefetch(new->hw);
+ return new;
+}
+
+static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
+ struct ioat_dma_chan *ioat_chan)
+{
+ if (!ioat_chan)
+ return NULL;
+
+ switch (ioat_chan->device->version) {
+ case IOAT_VER_1_2:
+ return ioat1_dma_get_next_descriptor(ioat_chan);
+ break;
+ case IOAT_VER_2_0:
+ return ioat2_dma_get_next_descriptor(ioat_chan);
+ break;
+ }
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
struct dma_chan *chan,
size_t len,
int int_en)
return new ? &new->async_tx : NULL;
}
+static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
+ struct dma_chan *chan,
+ size_t len,
+ int int_en)
+{
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+ struct ioat_desc_sw *new;
+
+ spin_lock_bh(&ioat_chan->desc_lock);
+ new = ioat2_dma_get_next_descriptor(ioat_chan);
+ new->len = len;
+
+ /* leave ioat_chan->desc_lock set in version 2 path */
+ return new ? &new->async_tx : NULL;
+}
+
+
/**
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
* descriptors to hw
* @chan: DMA channel handle
*/
-static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
+static inline void __ioat1_dma_memcpy_issue_pending(
+ struct ioat_dma_chan *ioat_chan)
+{
+ ioat_chan->pending = 0;
+ writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
+}
+
+static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
if (ioat_chan->pending != 0) {
- ioat_chan->pending = 0;
- writeb(IOAT_CHANCMD_APPEND,
- ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
+ spin_lock_bh(&ioat_chan->desc_lock);
+ __ioat1_dma_memcpy_issue_pending(ioat_chan);
+ spin_unlock_bh(&ioat_chan->desc_lock);
+ }
+}
+
+static inline void __ioat2_dma_memcpy_issue_pending(
+ struct ioat_dma_chan *ioat_chan)
+{
+ ioat_chan->pending = 0;
+ writew(ioat_chan->dmacount,
+ ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+}
+
+static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
+{
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+
+ if (ioat_chan->pending != 0) {
+ spin_lock_bh(&ioat_chan->desc_lock);
+ __ioat2_dma_memcpy_issue_pending(ioat_chan);
+ spin_unlock_bh(&ioat_chan->desc_lock);
}
}
chan->reg_base + IOAT_CHANCTRL_OFFSET);
}
+/**
+ * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
+ * @chan: ioat channel to be cleaned up
+ */
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
{
unsigned long phys_complete;
struct ioat_desc_sw *desc, *_desc;
dma_cookie_t cookie = 0;
+ unsigned long desc_phys;
+ struct ioat_desc_sw *latest_desc;
prefetch(ioat_chan->completion_virt);
cookie = 0;
spin_lock_bh(&ioat_chan->desc_lock);
- list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
-
- /*
- * Incoming DMA requests may use multiple descriptors, due to
- * exceeding xfercap, perhaps. If so, only the last one will
- * have a cookie, and require unmapping.
- */
- if (desc->async_tx.cookie) {
- cookie = desc->async_tx.cookie;
+ switch (ioat_chan->device->version) {
+ case IOAT_VER_1_2:
+ list_for_each_entry_safe(desc, _desc,
+ &ioat_chan->used_desc, node) {
/*
- * yes we are unmapping both _page and _single alloc'd
- * regions with unmap_page. Is this *really* that bad?
+ * Incoming DMA requests may use multiple descriptors,
+ * due to exceeding xfercap, perhaps. If so, only the
+ * last one will have a cookie, and require unmapping.
*/
- pci_unmap_page(ioat_chan->device->pdev,
- pci_unmap_addr(desc, dst),
- pci_unmap_len(desc, len),
- PCI_DMA_FROMDEVICE);
- pci_unmap_page(ioat_chan->device->pdev,
- pci_unmap_addr(desc, src),
- pci_unmap_len(desc, len),
- PCI_DMA_TODEVICE);
- if (desc->async_tx.callback) {
- desc->async_tx.callback(
- desc->async_tx.callback_param);
- desc->async_tx.callback = NULL;
+ if (desc->async_tx.cookie) {
+ cookie = desc->async_tx.cookie;
+
+ /*
+ * yes we are unmapping both _page and _single
+ * alloc'd regions with unmap_page. Is this
+ * *really* that bad?
+ */
+ pci_unmap_page(ioat_chan->device->pdev,
+ pci_unmap_addr(desc, dst),
+ pci_unmap_len(desc, len),
+ PCI_DMA_FROMDEVICE);
+ pci_unmap_page(ioat_chan->device->pdev,
+ pci_unmap_addr(desc, src),
+ pci_unmap_len(desc, len),
+ PCI_DMA_TODEVICE);
+
+ if (desc->async_tx.callback) {
+ desc->async_tx.callback(desc->async_tx.callback_param);
+ desc->async_tx.callback = NULL;
+ }
}
- }
- if (desc->async_tx.phys != phys_complete) {
- /*
- * a completed entry, but not the last, so cleanup
- * if the client is done with the descriptor
- */
- if (desc->async_tx.ack) {
- list_del(&desc->node);
- list_add_tail(&desc->node,
- &ioat_chan->free_desc);
- } else
+ if (desc->async_tx.phys != phys_complete) {
+ /*
+ * a completed entry, but not the last, so clean
+ * up if the client is done with the descriptor
+ */
+ if (desc->async_tx.ack) {
+ list_del(&desc->node);
+ list_add_tail(&desc->node,
+ &ioat_chan->free_desc);
+ } else
+ desc->async_tx.cookie = 0;
+ } else {
+ /*
+ * last used desc. Do not remove, so we can
+ * append from it, but don't look at it next
+ * time, either
+ */
desc->async_tx.cookie = 0;
- } else {
- /*
- * last used desc. Do not remove, so we can append from
- * it, but don't look at it next time, either
- */
- desc->async_tx.cookie = 0;
- /* TODO check status bits? */
+ /* TODO check status bits? */
+ break;
+ }
+ }
+ break;
+ case IOAT_VER_2_0:
+ /* has some other thread has already cleaned up? */
+ if (ioat_chan->used_desc.prev == NULL)
break;
+
+ /* work backwards to find latest finished desc */
+ desc = to_ioat_desc(ioat_chan->used_desc.next);
+ latest_desc = NULL;
+ do {
+ desc = to_ioat_desc(desc->node.prev);
+ desc_phys = (unsigned long)desc->async_tx.phys
+ & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
+ if (desc_phys == phys_complete) {
+ latest_desc = desc;
+ break;
+ }
+ } while (&desc->node != ioat_chan->used_desc.prev);
+
+ if (latest_desc != NULL) {
+
+ /* work forwards to clear finished descriptors */
+ for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
+ &desc->node != latest_desc->node.next &&
+ &desc->node != ioat_chan->used_desc.next;
+ desc = to_ioat_desc(desc->node.next)) {
+ if (desc->async_tx.cookie) {
+ cookie = desc->async_tx.cookie;
+ desc->async_tx.cookie = 0;
+
+ pci_unmap_page(ioat_chan->device->pdev,
+ pci_unmap_addr(desc, dst),
+ pci_unmap_len(desc, len),
+ PCI_DMA_FROMDEVICE);
+ pci_unmap_page(ioat_chan->device->pdev,
+ pci_unmap_addr(desc, src),
+ pci_unmap_len(desc, len),
+ PCI_DMA_TODEVICE);
+
+ if (desc->async_tx.callback) {
+ desc->async_tx.callback(desc->async_tx.callback_param);
+ desc->async_tx.callback = NULL;
+ }
+ }
+ }
+
+ /* move used.prev up beyond those that are finished */
+ if (&desc->node == ioat_chan->used_desc.next)
+ ioat_chan->used_desc.prev = NULL;
+ else
+ ioat_chan->used_desc.prev = &desc->node;
}
+ break;
}
spin_unlock_bh(&ioat_chan->desc_lock);
return dma_async_is_complete(cookie, last_complete, last_used);
}
-/* PCI API */
-
static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
{
struct ioat_desc_sw *desc;
desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
| IOAT_DMA_DESCRIPTOR_CTL_INT_GN
| IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
- desc->hw->next = 0;
desc->hw->size = 0;
desc->hw->src_addr = 0;
desc->hw->dst_addr = 0;
desc->async_tx.ack = 1;
-
- list_add_tail(&desc->node, &ioat_chan->used_desc);
+ switch (ioat_chan->device->version) {
+ case IOAT_VER_1_2:
+ desc->hw->next = 0;
+ list_add_tail(&desc->node, &ioat_chan->used_desc);
+
+ writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+ ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
+ writel(((u64) desc->async_tx.phys) >> 32,
+ ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
+
+ writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
+ + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+ break;
+ case IOAT_VER_2_0:
+ writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+ ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+ writel(((u64) desc->async_tx.phys) >> 32,
+ ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+
+ ioat_chan->dmacount++;
+ __ioat2_dma_memcpy_issue_pending(ioat_chan);
+ break;
+ }
spin_unlock_bh(&ioat_chan->desc_lock);
-
- writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
- ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
- writel(((u64) desc->async_tx.phys) >> 32,
- ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
-
- writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
}
/*
dma_chan = container_of(device->common.channels.next,
struct dma_chan,
device_node);
- if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
+ if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
dev_err(&device->pdev->dev,
"selftest cannot allocate chan resource\n");
err = -ENODEV;
goto out;
}
- tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
+ tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
if (!tx) {
dev_err(&device->pdev->dev,
"Self-test prep failed, disabling\n");
async_tx_ack(tx);
addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
- DMA_TO_DEVICE);
- ioat_set_src(addr, tx, 0);
+ DMA_TO_DEVICE);
+ tx->tx_set_src(addr, tx, 0);
addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
- DMA_FROM_DEVICE);
- ioat_set_dest(addr, tx, 0);
+ DMA_FROM_DEVICE);
+ tx->tx_set_dest(addr, tx, 0);
tx->callback = ioat_dma_test_callback;
tx->callback_param = (void *)0x8086;
- cookie = ioat_tx_submit(tx);
+ cookie = tx->tx_submit(tx);
if (cookie < 0) {
dev_err(&device->pdev->dev,
"Self-test setup failed, disabling\n");
err = -ENODEV;
goto free_resources;
}
- ioat_dma_memcpy_issue_pending(dma_chan);
+ device->common.device_issue_pending(dma_chan);
msleep(1);
- if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
+ != DMA_SUCCESS) {
dev_err(&device->pdev->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
}
free_resources:
- ioat_dma_free_chan_resources(dma_chan);
+ device->common.device_free_chan_resources(dma_chan);
out:
kfree(src);
kfree(dest);
INIT_LIST_HEAD(&device->common.channels);
ioat_dma_enumerate_channels(device);
- dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
device->common.device_alloc_chan_resources =
ioat_dma_alloc_chan_resources;
device->common.device_free_chan_resources =
ioat_dma_free_chan_resources;
- device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
+ device->common.dev = &pdev->dev;
+
+ dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
device->common.device_is_tx_complete = ioat_dma_is_complete;
- device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
device->common.device_dependency_added = ioat_dma_dependency_added;
- device->common.dev = &pdev->dev;
+ switch (device->version) {
+ case IOAT_VER_1_2:
+ device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
+ device->common.device_issue_pending =
+ ioat1_dma_memcpy_issue_pending;
+ break;
+ case IOAT_VER_2_0:
+ device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
+ device->common.device_issue_pending =
+ ioat2_dma_memcpy_issue_pending;
+ break;
+ }
+
dev_err(&device->pdev->dev,
"Intel(R) I/OAT DMA Engine found,"
" %d channels, device version 0x%02x, driver version %s\n",
/*
- * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
+ * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
#include <linux/cache.h>
#include <linux/pci_ids.h>
-#define IOAT_DMA_VERSION "1.26"
+#define IOAT_DMA_VERSION "2.04"
enum ioat_interrupt {
none = 0,
};
#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
+#define IOAT_DMA_DCA_ANY_CPU ~0
+
/**
* struct ioatdma_device - internal representation of a IOAT device
* @dma_pool: for allocating DMA descriptors
* @common: embedded struct dma_device
* @version: version of ioatdma device
+ * @irq_mode: which style irq to use
+ * @msix_entries: irq handlers
+ * @idx: per channel data
*/
struct ioatdma_device {
/**
* struct ioat_dma_chan - internal representation of a DMA channel
- * @device:
- * @reg_base:
- * @sw_in_use:
- * @completion:
- * @completion_low:
- * @completion_high:
- * @completed_cookie: last cookie seen completed on cleanup
- * @cookie: value of last cookie given to client
- * @last_completion:
- * @xfercap:
- * @desc_lock:
- * @free_desc:
- * @used_desc:
- * @resource:
- * @device_node:
*/
-
struct ioat_dma_chan {
void __iomem *reg_base;
struct list_head used_desc;
int pending;
+ int dmacount;
+ int desccount;
struct ioatdma_device *device;
struct dma_chan common;
struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
void __iomem *iobase);
void ioat_dma_remove(struct ioatdma_device *device);
-struct dca_provider *ioat_dca_init(struct pci_dev *pdev,
- void __iomem *iobase);
+struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
#else
#define ioat_dma_probe(pdev, iobase) NULL
#define ioat_dma_remove(device) do { } while (0)
#define ioat_dca_init(pdev, iobase) NULL
+#define ioat2_dca_init(pdev, iobase) NULL
#endif
#endif /* IOATDMA_H */
/*
- * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
+ * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
#define _IOAT_HW_H_
/* PCI Configuration Space Values */
-#define IOAT_PCI_VID 0x8086
-#define IOAT_PCI_DID 0x1A38
-#define IOAT_PCI_RID 0x00
-#define IOAT_PCI_SVID 0x8086
-#define IOAT_PCI_SID 0x8086
-#define IOAT_VER_1_2 0x12 /* Version 1.2 */
+#define IOAT_PCI_VID 0x8086
+
+/* CB device ID's */
+#define IOAT_PCI_DID_5000 0x1A38
+#define IOAT_PCI_DID_CNB 0x360B
+#define IOAT_PCI_DID_SCNB 0x65FF
+#define IOAT_PCI_DID_SNB 0x402F
+
+#define IOAT_PCI_RID 0x00
+#define IOAT_PCI_SVID 0x8086
+#define IOAT_PCI_SID 0x8086
+#define IOAT_VER_1_2 0x12 /* Version 1.2 */
+#define IOAT_VER_2_0 0x20 /* Version 2.0 */
struct ioat_dma_descriptor {
uint32_t size;
#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008
#define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010
#define IOAT_DMA_DESCRIPTOR_NUL 0x00000020
-#define IOAT_DMA_DESCRIPTOR_OPCODE 0xFF000000
+#define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040
+#define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080
+#define IOAT_DMA_DESCRIPTOR_CTL_BNDL 0x00000100
+#define IOAT_DMA_DESCRIPTOR_CTL_DCA 0x00000200
+#define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT 0x00000400
+
+#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000
+#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA 0x00000000
+
+#define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA 0x00000001
+#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK 0xFF000000
#endif
#define IOAT_INTRCTRL_MASTER_INT_EN 0x01 /* Master Interrupt Enable */
#define IOAT_INTRCTRL_INT_STATUS 0x02 /* ATTNSTATUS -or- Channel Int */
#define IOAT_INTRCTRL_INT 0x04 /* INT_STATUS -and- MASTER_INT_EN */
-#define IOAT_INTRCTRL_MSIX_VECTOR_CONTROL 0x08 /* Enable all MSI-X vectors */
+#define IOAT_INTRCTRL_MSIX_VECTOR_CONTROL 0x08 /* Enable all MSI-X vectors */
#define IOAT_ATTNSTATUS_OFFSET 0x04 /* Each bit is a channel */
#define IOAT_VER_OFFSET 0x08 /* 8-bit */
#define IOAT_VER_MAJOR_MASK 0xF0
#define IOAT_VER_MINOR_MASK 0x0F
-#define GET_IOAT_VER_MAJOR(x) ((x) & IOAT_VER_MAJOR_MASK)
+#define GET_IOAT_VER_MAJOR(x) (((x) & IOAT_VER_MAJOR_MASK) >> 4)
#define GET_IOAT_VER_MINOR(x) ((x) & IOAT_VER_MINOR_MASK)
#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */
#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */
#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */
-#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalesing Supported */
+#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */
#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */
#define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001
-
#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
/* DMA Channel Registers */
#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
#define IOAT_CHANCTRL_INT_DISABLE 0x0001
-#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatability */
-#define IOAT_DMA_COMP_V1 0x0001 /* Compatability with DMA version 1 */
-
-#define IOAT_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */
-#define IOAT_CHANSTS_OFFSET_LOW 0x04
-#define IOAT_CHANSTS_OFFSET_HIGH 0x08
-#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR 0xFFFFFFFFFFFFFFC0UL
+#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
+#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
+#define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */
+
+
+#define IOAT1_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */
+#define IOAT2_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */
+#define IOAT_CHANSTS_OFFSET(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHANSTS_OFFSET : IOAT2_CHANSTS_OFFSET)
+#define IOAT1_CHANSTS_OFFSET_LOW 0x04
+#define IOAT2_CHANSTS_OFFSET_LOW 0x08
+#define IOAT_CHANSTS_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHANSTS_OFFSET_LOW : IOAT2_CHANSTS_OFFSET_LOW)
+#define IOAT1_CHANSTS_OFFSET_HIGH 0x08
+#define IOAT2_CHANSTS_OFFSET_HIGH 0x0C
+#define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH)
+#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F
#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010
+#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x0000000000000008
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3
-#define IOAT_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
-#define IOAT_CHAINADDR_OFFSET_LOW 0x0C
-#define IOAT_CHAINADDR_OFFSET_HIGH 0x10
-#define IOAT_CHANCMD_OFFSET 0x14 /* 8-bit DMA Channel Command Register */
+
+#define IOAT_CHAN_DMACOUNT_OFFSET 0x06 /* 16-bit DMA Count register */
+
+#define IOAT_DCACTRL_OFFSET 0x30 /* 32 bit Direct Cache Access Control Register */
+#define IOAT_DCACTRL_CMPL_WRITE_ENABLE 0x10000
+#define IOAT_DCACTRL_TARGET_CPU_MASK 0xFFFF /* APIC ID */
+
+/* CB DCA Memory Space Registers */
+#define IOAT_DCAOFFSET_OFFSET 0x14
+/* CB_BAR + IOAT_DCAOFFSET value */
+#define IOAT_DCA_VER_OFFSET 0x00
+#define IOAT_DCA_VER_MAJOR_MASK 0xF0
+#define IOAT_DCA_VER_MINOR_MASK 0x0F
+
+#define IOAT_DCA_COMP_OFFSET 0x02
+#define IOAT_DCA_COMP_V1 0x1
+
+#define IOAT_FSB_CAPABILITY_OFFSET 0x04
+#define IOAT_FSB_CAPABILITY_PREFETCH 0x1
+
+#define IOAT_PCI_CAPABILITY_OFFSET 0x06
+#define IOAT_PCI_CAPABILITY_MEMWR 0x1
+
+#define IOAT_FSB_CAP_ENABLE_OFFSET 0x08
+#define IOAT_FSB_CAP_ENABLE_PREFETCH 0x1
+
+#define IOAT_PCI_CAP_ENABLE_OFFSET 0x0A
+#define IOAT_PCI_CAP_ENABLE_MEMWR 0x1
+
+#define IOAT_APICID_TAG_MAP_OFFSET 0x0C
+#define IOAT_APICID_TAG_MAP_TAG0 0x0000000F
+#define IOAT_APICID_TAG_MAP_TAG0_SHIFT 0
+#define IOAT_APICID_TAG_MAP_TAG1 0x000000F0
+#define IOAT_APICID_TAG_MAP_TAG1_SHIFT 4
+#define IOAT_APICID_TAG_MAP_TAG2 0x00000F00
+#define IOAT_APICID_TAG_MAP_TAG2_SHIFT 8
+#define IOAT_APICID_TAG_MAP_TAG3 0x0000F000
+#define IOAT_APICID_TAG_MAP_TAG3_SHIFT 12
+#define IOAT_APICID_TAG_MAP_TAG4 0x000F0000
+#define IOAT_APICID_TAG_MAP_TAG4_SHIFT 16
+#define IOAT_APICID_TAG_CB2_VALID 0x8080808080
+
+#define IOAT_DCA_GREQID_OFFSET 0x10
+#define IOAT_DCA_GREQID_SIZE 0x04
+#define IOAT_DCA_GREQID_MASK 0xFFFF
+#define IOAT_DCA_GREQID_IGNOREFUN 0x10000000
+#define IOAT_DCA_GREQID_VALID 0x20000000
+#define IOAT_DCA_GREQID_LASTID 0x80000000
+
+
+
+#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
+#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */
+#define IOAT_CHAINADDR_OFFSET(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHAINADDR_OFFSET : IOAT2_CHAINADDR_OFFSET)
+#define IOAT1_CHAINADDR_OFFSET_LOW 0x0C
+#define IOAT2_CHAINADDR_OFFSET_LOW 0x10
+#define IOAT_CHAINADDR_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHAINADDR_OFFSET_LOW : IOAT2_CHAINADDR_OFFSET_LOW)
+#define IOAT1_CHAINADDR_OFFSET_HIGH 0x10
+#define IOAT2_CHAINADDR_OFFSET_HIGH 0x14
+#define IOAT_CHAINADDR_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHAINADDR_OFFSET_HIGH : IOAT2_CHAINADDR_OFFSET_HIGH)
+
+#define IOAT1_CHANCMD_OFFSET 0x14 /* 8-bit DMA Channel Command Register */
+#define IOAT2_CHANCMD_OFFSET 0x04 /* 8-bit DMA Channel Command Register */
+#define IOAT_CHANCMD_OFFSET(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHANCMD_OFFSET : IOAT2_CHANCMD_OFFSET)
#define IOAT_CHANCMD_RESET 0x20
#define IOAT_CHANCMD_RESUME 0x10
#define IOAT_CHANCMD_ABORT 0x08
#define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000
#define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000
#define IOAT_CHANERR_SOFT_ERR 0x4000
+#define IOAT_CHANERR_UNAFFILIATED_ERR 0x8000
#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
*
*/
static struct pci_driver i5000_driver = {
- .name = __stringify(KBUILD_BASENAME),
+ .name = KBUILD_BASENAME,
.probe = i5000_init_one,
.remove = __devexit_p(i5000_remove_one),
.id_table = i5000_pci_tbl,
CD-ROM drive, you can say N to all other CD-ROM options, but be sure
to say Y or M to "ISO 9660 CD-ROM file system support".
- Note that older versions of LILO (LInux LOader) cannot properly deal
- with IDE/ATAPI CD-ROMs, so install LILO 16 or higher, available from
- <http://lilo.go.dyndns.org/>.
-
To compile this driver as a module, choose M here: the
module will be called ide-cd.
hold = ATA_DMA2_HOLD;
break;
default:
- BUG();
- break;
+ return;
}
if (speed >= XFER_UDMA_0)
return do_rw_taskfile(drive, args);
} else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
u8 *args = rq->buffer;
- u8 sel;
if (!args)
goto done;
hwif->OUTB(args[3], IDE_SECTOR_REG);
hwif->OUTB(args[4], IDE_LCYL_REG);
hwif->OUTB(args[5], IDE_HCYL_REG);
- sel = (args[6] & ~0x10);
- if (drive->select.b.unit)
- sel |= 0x10;
- hwif->OUTB(sel, IDE_SELECT_REG);
+ hwif->OUTB((args[6] & 0xEF)|drive->select.all, IDE_SELECT_REG);
ide_cmd(drive, args[0], args[2], &drive_cmd_intr);
return ide_started;
} else if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
if (drive->addressing == 1) {
__u64 sectors = 0;
u32 low = 0, high = 0;
+ hwif->OUTB(drive->ctl&~0x80, IDE_CONTROL_REG);
low = ide_read_24(drive);
hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
high = ide_read_24(drive);
/*
- * linux/drivers/ide/pci/cmd64x.c Version 1.50 May 10, 2007
+ * linux/drivers/ide/pci/cmd64x.c Version 1.51 Nov 8, 2007
*
* cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
* Due to massive hardware bugs, UltraDMA is only supported
u8 mrdmode = inb(hwif->dma_master + 0x01);
/* clear the interrupt bit */
- outb(mrdmode | irq_mask, hwif->dma_master + 0x01);
+ outb((mrdmode & ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1)) | irq_mask,
+ hwif->dma_master + 0x01);
return err;
}
case XFER_MW_DMA_1: timings = 0x00012121; break;
case XFER_MW_DMA_2: timings = 0x00002020; break;
default:
- BUG();
- break;
+ return;
}
basereg = CS5530_BASEREG(drive->hwif);
reg = inl(basereg + 4); /* get drive0 config register */
static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
- ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]);
- return 0;
+ return ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]);
}
static const struct pci_device_id it821x_pci_tbl[] = {
static int __devinit jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
- ide_setup_pci_device(dev, &jmicron_chipset);
- return 0;
+ return ide_setup_pci_device(dev, &jmicron_chipset);
}
/* All JMB PATA controllers have and will continue to have the same
}
break;
default:
- BUG();
- break;
+ return;
}
if (unit == 0) { /* are we configuring drive0? */
sis_program_timings(drive, speed);
break;
default:
- BUG();
break;
}
}
hwif->drives[0].autotune = IDE_TUNE_AUTO;
hwif->drives[1].autotune = IDE_TUNE_AUTO;
hwif->host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
+ IDE_HFLAG_PIO_NO_DOWNGRADE |
IDE_HFLAG_POST_SET_MODE;
hwif->pio_mask = ATA_PIO4;
hwif->set_pio_mode = pmac_ide_set_pio_mode;
* May be copied or modified under the terms of the GNU General Public License
*/
-/*
- * This module provides support for automatic detection and
- * configuration of all PCI IDE interfaces present in a system.
- */
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
free_regs:
free_page(lg->regs_page);
release_guest:
- memset(lg, 0, sizeof(*lg));
+ kfree(lg);
unlock:
mutex_unlock(&lguest_lock);
return err;
}
static struct dma_async_tx_descriptor *
-ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
+ unsigned long pending)
{
int disks = sh->disks;
int pd_idx = sh->pd_idx, i;
/* check if prexor is active which means only process blocks
* that are part of a read-modify-write (Wantprexor)
*/
- int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+ int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
pr_debug("%s: stripe %llu\n", __FUNCTION__,
(unsigned long long)sh->sector);
}
static void
-ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
+ unsigned long pending)
{
/* kernel stack size limits the total number of disks */
int disks = sh->disks;
int count = 0, pd_idx = sh->pd_idx, i;
struct page *xor_dest;
- int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+ int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
unsigned long flags;
dma_async_tx_callback callback;
}
/* check whether this postxor is part of a write */
- callback = test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending) ?
+ callback = test_bit(STRIPE_OP_BIODRAIN, &pending) ?
ops_complete_write : ops_complete_postxor;
/* 1/ if we prexor'd then the dest is reused as a source
tx = ops_run_prexor(sh, tx);
if (test_bit(STRIPE_OP_BIODRAIN, &pending)) {
- tx = ops_run_biodrain(sh, tx);
+ tx = ops_run_biodrain(sh, tx, pending);
overlap_clear++;
}
if (test_bit(STRIPE_OP_POSTXOR, &pending))
- ops_run_postxor(sh, tx);
+ ops_run_postxor(sh, tx, pending);
if (test_bit(STRIPE_OP_CHECK, &pending))
ops_run_check(sh);
idd->idd_pdev->bus->number == pdev->bus->number &&
3 == PCI_SLOT(pdev->devfn))
found = 1;
- pci_dev_put(pdev);
} while (pdev && !found);
- if (NULL != pdev)
+ if (NULL != pdev) {
+ pci_dev_put(pdev);
return IOC4_VARIANT_IO9;
+ }
/* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */
pdev = NULL;
idd->idd_pdev->bus->number == pdev->bus->number &&
3 == PCI_SLOT(pdev->devfn))
found = 1;
- pci_dev_put(pdev);
} while (pdev && !found);
- if (NULL != pdev)
+ if (NULL != pdev) {
+ pci_dev_put(pdev);
return IOC4_VARIANT_IO10;
+ }
/* PCI-RT: No SCSI/SATA controller will be present */
return IOC4_VARIANT_PCI_RT;
#include <asm/system.h>
#include <asm/ethernet.h>
#include <asm/cache.h>
+#include <asm/arch/io_interface_mux.h>
//#define ETHDEBUG
#define D(x)
* by this lock as well.
*/
spinlock_t lock;
+
+ spinlock_t led_lock; /* Protect LED state */
+ spinlock_t transceiver_lock; /* Protect transceiver state. */
};
typedef struct etrax_eth_descr
void (*check_duplex)(struct net_device* dev);
};
-struct transceiver_ops* transceiver;
-
/* Duplex settings */
enum duplex
{
/* Dma descriptors etc. */
-#define MAX_MEDIA_DATA_SIZE 1518
+#define MAX_MEDIA_DATA_SIZE 1522
#define MIN_PACKET_LEN 46
#define ETHER_HEAD_LEN 14
/*Intel LXT972A specific*/
#define MDIO_INT_STATUS_REG_2 0x0011
-#define MDIO_INT_FULL_DUPLEX_IND ( 1 << 9 )
-#define MDIO_INT_SPEED ( 1 << 14 )
+#define MDIO_INT_FULL_DUPLEX_IND (1 << 9)
+#define MDIO_INT_SPEED (1 << 14)
/* Network flash constants */
#define NET_FLASH_TIME (HZ/50) /* 20 ms */
#define NO_NETWORK_ACTIVITY 0
#define NETWORK_ACTIVITY 1
-#define NBR_OF_RX_DESC 64
-#define NBR_OF_TX_DESC 256
+#define NBR_OF_RX_DESC 32
+#define NBR_OF_TX_DESC 16
/* Large packets are sent directly to upper layers while small packets are */
/* copied (to reduce memory waste). The following constant decides the breakpoint */
static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to
to be processed */
static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */
-static etrax_eth_descr *myPrevRxDesc; /* The descriptor right before myNextRxDesc */
static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32)));
static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32)));
static unsigned int network_rec_config_shadow = 0;
-static unsigned int mdio_phy_addr; /* Transciever address */
static unsigned int network_tr_ctrl_shadow = 0;
static void e100_tx_timeout(struct net_device *dev);
static struct net_device_stats *e100_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void e100_hardware_send_packet(char *buf, int length);
+static void e100_hardware_send_packet(struct net_local* np, char *buf, int length);
static void update_rx_stats(struct net_device_stats *);
static void update_tx_stats(struct net_device_stats *);
static int e100_probe_transceiver(struct net_device* dev);
static void e100_set_network_leds(int active);
static const struct ethtool_ops e100_ethtool_ops;
-
+#if defined(CONFIG_ETRAX_NO_PHY)
+static void dummy_check_speed(struct net_device* dev);
+static void dummy_check_duplex(struct net_device* dev);
+#else
static void broadcom_check_speed(struct net_device* dev);
static void broadcom_check_duplex(struct net_device* dev);
static void tdk_check_speed(struct net_device* dev);
static void intel_check_duplex(struct net_device* dev);
static void generic_check_speed(struct net_device* dev);
static void generic_check_duplex(struct net_device* dev);
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void e100_netpoll(struct net_device* dev);
+#endif
+
+static int autoneg_normal = 1;
struct transceiver_ops transceivers[] =
{
+#if defined(CONFIG_ETRAX_NO_PHY)
+ {0x0000, dummy_check_speed, dummy_check_duplex} /* Dummy */
+#else
{0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */
{0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */
{0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */
{0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/
{0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */
+#endif
};
+struct transceiver_ops* transceiver = &transceivers[0];
+
#define tx_done(dev) (*R_DMA_CH0_CMD == 0)
/*
int i, err;
printk(KERN_INFO
- "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 2000-2003 Axis Communications AB\n");
+ "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n");
- dev = alloc_etherdev(sizeof(struct net_local));
- np = dev->priv;
+ if (cris_request_io_interface(if_eth, cardname)) {
+ printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n");
+ return -EBUSY;
+ }
+ dev = alloc_etherdev(sizeof(struct net_local));
if (!dev)
return -ENOMEM;
+ np = netdev_priv(dev);
+
+ /* we do our own locking */
+ dev->features |= NETIF_F_LLTX;
+
dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */
/* now setup our etrax specific stuff */
dev->do_ioctl = e100_ioctl;
dev->set_config = e100_set_config;
dev->tx_timeout = e100_tx_timeout;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = e100_netpoll;
+#endif
+
+ spin_lock_init(&np->lock);
+ spin_lock_init(&np->led_lock);
+ spin_lock_init(&np->transceiver_lock);
/* Initialise the list of Etrax DMA-descriptors */
/* Initialise receive descriptors */
for (i = 0; i < NBR_OF_RX_DESC; i++) {
- /* Allocate two extra cachelines to make sure that buffer used by DMA
- * does not share cacheline with any other data (to avoid cache bug)
+ /* Allocate two extra cachelines to make sure that buffer used
+ * by DMA does not share cacheline with any other data (to
+ * avoid cache bug)
*/
RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
if (!RxDescList[i].skb)
myNextRxDesc = &RxDescList[0];
myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
- myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
myFirstTxDesc = &TxDescList[0];
myNextTxDesc = &TxDescList[0];
myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1];
current_speed = 10;
current_speed_selection = 0; /* Auto */
speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
- duplex_timer.data = (unsigned long)dev;
+ speed_timer.data = (unsigned long)dev;
speed_timer.function = e100_check_speed;
clear_led_timer.function = e100_clear_network_leds;
+ clear_led_timer.data = (unsigned long)dev;
full_duplex = 0;
current_duplex = autoneg;
duplex_timer.function = e100_check_duplex;
/* Initialize mii interface */
- np->mii_if.phy_id = mdio_phy_addr;
np->mii_if.phy_id_mask = 0x1f;
np->mii_if.reg_num_mask = 0x1f;
np->mii_if.dev = dev;
/* unwanted addresses are matched */
*R_NETWORK_GA_0 = 0x00000000;
*R_NETWORK_GA_1 = 0x00000000;
+
+ /* Initialize next time the led can flash */
+ led_next_time = jiffies;
return 0;
}
static int
e100_set_mac_address(struct net_device *dev, void *p)
{
- struct net_local *np = (struct net_local *)dev->priv;
+ struct net_local *np = netdev_priv(dev);
struct sockaddr *addr = p;
- int i;
+ DECLARE_MAC_BUF(mac);
spin_lock(&np->lock); /* preemption protection */
goto grace_exit2;
}
+ /*
+ * Always allocate the DMA channels after the IRQ,
+ * and clean up on failure.
+ */
+
+ if (cris_request_dma(NETWORK_TX_DMA_NBR,
+ cardname,
+ DMA_VERBOSE_ON_ERROR,
+ dma_eth)) {
+ goto grace_exit3;
+ }
+
+ if (cris_request_dma(NETWORK_RX_DMA_NBR,
+ cardname,
+ DMA_VERBOSE_ON_ERROR,
+ dma_eth)) {
+ goto grace_exit4;
+ }
+
/* give the HW an idea of what MAC address we want */
*R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
*R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */
#else
+ SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522);
SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive);
SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable);
SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable);
*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
- save_flags(flags);
- cli();
+ local_irq_save(flags);
/* enable the irq's for ethernet DMA */
*R_DMA_CH0_FIRST = 0;
*R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
+ netif_start_queue(dev);
- restore_flags(flags);
+ local_irq_restore(flags);
/* Probe for transceiver */
if (e100_probe_transceiver(dev))
- goto grace_exit3;
+ goto grace_exit5;
/* Start duplex/speed timers */
add_timer(&speed_timer);
/* We are now ready to accept transmit requeusts from
* the queueing layer of the networking.
*/
- netif_start_queue(dev);
+ netif_carrier_on(dev);
return 0;
+grace_exit5:
+ cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
+grace_exit4:
+ cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
grace_exit3:
free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
grace_exit2:
return -EAGAIN;
}
-
+#if defined(CONFIG_ETRAX_NO_PHY)
+static void
+dummy_check_speed(struct net_device* dev)
+{
+ current_speed = 100;
+}
+#else
static void
generic_check_speed(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
if ((data & ADVERTISE_100FULL) ||
(data & ADVERTISE_100HALF))
current_speed = 100;
tdk_check_speed(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MDIO_TDK_DIAGNOSTIC_REG);
current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
}
broadcom_check_speed(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MDIO_AUX_CTRL_STATUS_REG);
current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
}
intel_check_speed(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MDIO_INT_STATUS_REG_2);
current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
}
-
+#endif
static void
e100_check_speed(unsigned long priv)
{
struct net_device* dev = (struct net_device*)priv;
+ struct net_local *np = netdev_priv(dev);
static int led_initiated = 0;
unsigned long data;
int old_speed = current_speed;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR);
+ spin_lock(&np->transceiver_lock);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR);
if (!(data & BMSR_LSTATUS)) {
current_speed = 0;
} else {
transceiver->check_speed(dev);
}
+ spin_lock(&np->led_lock);
if ((old_speed != current_speed) || !led_initiated) {
led_initiated = 1;
e100_set_network_leds(NO_NETWORK_ACTIVITY);
+ if (current_speed)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
}
+ spin_unlock(&np->led_lock);
/* Reinitialize the timer. */
speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
add_timer(&speed_timer);
+
+ spin_unlock(&np->transceiver_lock);
}
static void
e100_negotiate(struct net_device* dev)
{
- unsigned short data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE);
+ struct net_local *np = netdev_priv(dev);
+ unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MII_ADVERTISE);
/* Discard old speed and duplex settings */
data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL |
ADVERTISE_10HALF | ADVERTISE_10FULL);
switch (current_speed_selection) {
- case 10 :
+ case 10:
if (current_duplex == full)
data |= ADVERTISE_10FULL;
else if (current_duplex == half)
data |= ADVERTISE_10HALF | ADVERTISE_10FULL;
break;
- case 100 :
+ case 100:
if (current_duplex == full)
data |= ADVERTISE_100FULL;
else if (current_duplex == half)
data |= ADVERTISE_100HALF | ADVERTISE_100FULL;
break;
- case 0 : /* Auto */
+ case 0: /* Auto */
if (current_duplex == full)
data |= ADVERTISE_100FULL | ADVERTISE_10FULL;
else if (current_duplex == half)
ADVERTISE_100HALF | ADVERTISE_100FULL;
break;
- default : /* assume autoneg speed and duplex */
+ default: /* assume autoneg speed and duplex */
data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL;
+ break;
}
- e100_set_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE, data);
+ e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
/* Renegotiate with link partner */
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR);
+ if (autoneg_normal) {
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
data |= BMCR_ANENABLE | BMCR_ANRESTART;
-
- e100_set_mdio_reg(dev, mdio_phy_addr, MII_BMCR, data);
+ }
+ e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
}
static void
e100_set_speed(struct net_device* dev, unsigned long speed)
{
+ struct net_local *np = netdev_priv(dev);
+
+ spin_lock(&np->transceiver_lock);
if (speed != current_speed_selection) {
current_speed_selection = speed;
e100_negotiate(dev);
}
+ spin_unlock(&np->transceiver_lock);
}
static void
e100_check_duplex(unsigned long priv)
{
struct net_device *dev = (struct net_device *)priv;
- struct net_local *np = (struct net_local *)dev->priv;
- int old_duplex = full_duplex;
+ struct net_local *np = netdev_priv(dev);
+ int old_duplex;
+
+ spin_lock(&np->transceiver_lock);
+ old_duplex = full_duplex;
transceiver->check_duplex(dev);
if (old_duplex != full_duplex) {
/* Duplex changed */
duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
add_timer(&duplex_timer);
np->mii_if.full_duplex = full_duplex;
+ spin_unlock(&np->transceiver_lock);
}
-
+#if defined(CONFIG_ETRAX_NO_PHY)
+static void
+dummy_check_duplex(struct net_device* dev)
+{
+ full_duplex = 1;
+}
+#else
static void
generic_check_duplex(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
if ((data & ADVERTISE_10FULL) ||
(data & ADVERTISE_100FULL))
full_duplex = 1;
tdk_check_duplex(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MDIO_TDK_DIAGNOSTIC_REG);
full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
}
broadcom_check_duplex(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MDIO_AUX_CTRL_STATUS_REG);
full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
}
intel_check_duplex(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MDIO_INT_STATUS_REG_2);
full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
}
-
+#endif
static void
e100_set_duplex(struct net_device* dev, enum duplex new_duplex)
{
+ struct net_local *np = netdev_priv(dev);
+
+ spin_lock(&np->transceiver_lock);
if (new_duplex != current_duplex) {
current_duplex = new_duplex;
e100_negotiate(dev);
}
+ spin_unlock(&np->transceiver_lock);
}
static int
e100_probe_transceiver(struct net_device* dev)
{
+ int ret = 0;
+
+#if !defined(CONFIG_ETRAX_NO_PHY)
unsigned int phyid_high;
unsigned int phyid_low;
unsigned int oui;
struct transceiver_ops* ops = NULL;
+ struct net_local *np = netdev_priv(dev);
+
+ spin_lock(&np->transceiver_lock);
/* Probe MDIO physical address */
- for (mdio_phy_addr = 0; mdio_phy_addr <= 31; mdio_phy_addr++) {
- if (e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR) != 0xffff)
+ for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31;
+ np->mii_if.phy_id++) {
+ if (e100_get_mdio_reg(dev,
+ np->mii_if.phy_id, MII_BMSR) != 0xffff)
break;
}
- if (mdio_phy_addr == 32)
- return -ENODEV;
+ if (np->mii_if.phy_id == 32) {
+ ret = -ENODEV;
+ goto out;
+ }
/* Get manufacturer */
- phyid_high = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID1);
- phyid_low = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID2);
+ phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1);
+ phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2);
oui = (phyid_high << 6) | (phyid_low >> 10);
for (ops = &transceivers[0]; ops->oui; ops++) {
break;
}
transceiver = ops;
-
- return 0;
+out:
+ spin_unlock(&np->transceiver_lock);
+#endif
+ return ret;
}
static int
static void
e100_reset_transceiver(struct net_device* dev)
{
+ struct net_local *np = netdev_priv(dev);
unsigned short cmd;
unsigned short data;
int bitCounter;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR);
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
- cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (mdio_phy_addr << 7) | (MII_BMCR << 2);
+ cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2);
e100_send_mdio_cmd(cmd, 1);
static void
e100_tx_timeout(struct net_device *dev)
{
- struct net_local *np = (struct net_local *)dev->priv;
+ struct net_local *np = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&np->lock, flags);
e100_reset_transceiver(dev);
/* and get rid of the packets that never got an interrupt */
- while (myFirstTxDesc != myNextTxDesc)
- {
+ while (myFirstTxDesc != myNextTxDesc) {
dev_kfree_skb(myFirstTxDesc->skb);
myFirstTxDesc->skb = 0;
myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
static int
e100_send_packet(struct sk_buff *skb, struct net_device *dev)
{
- struct net_local *np = (struct net_local *)dev->priv;
+ struct net_local *np = netdev_priv(dev);
unsigned char *buf = skb->data;
unsigned long flags;
dev->trans_start = jiffies;
- e100_hardware_send_packet(buf, skb->len);
+ e100_hardware_send_packet(np, buf, skb->len);
myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next);
e100rxtx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- struct net_local *np = (struct net_local *)dev->priv;
- unsigned long irqbits = *R_IRQ_MASK2_RD;
+ struct net_local *np = netdev_priv(dev);
+ unsigned long irqbits;
- /* Disable RX/TX IRQs to avoid reentrancy */
- *R_IRQ_MASK2_CLR =
- IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
- IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
+ /*
+ * Note that both rx and tx interrupts are blocked at this point,
+ * regardless of which got us here.
+ */
+
+ irqbits = *R_IRQ_MASK2_RD;
/* Handle received packets */
if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) {
* allocate a new buffer to put a packet in.
*/
e100_rx(dev);
- ((struct net_local *)dev->priv)->stats.rx_packets++;
+ np->stats.rx_packets++;
/* restart/continue on the channel, for safety */
*R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
/* clear dma channel 1 eop/descr irq bits */
}
/* Report any packets that have been sent */
- while (myFirstTxDesc != phys_to_virt(*R_DMA_CH0_FIRST) &&
- myFirstTxDesc != myNextTxDesc)
- {
+ while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
+ (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
np->stats.tx_bytes += myFirstTxDesc->skb->len;
np->stats.tx_packets++;
dev_kfree_skb_irq(myFirstTxDesc->skb);
myFirstTxDesc->skb = 0;
myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
+ /* Wake up queue. */
+ netif_wake_queue(dev);
}
if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) {
- /* acknowledge the eop interrupt and wake up queue */
+ /* acknowledge the eop interrupt. */
*R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
- netif_wake_queue(dev);
}
- /* Enable RX/TX IRQs again */
- *R_IRQ_MASK2_SET =
- IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) |
- IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set);
-
return IRQ_HANDLED;
}
e100nw_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- struct net_local *np = (struct net_local *)dev->priv;
+ struct net_local *np = netdev_priv(dev);
unsigned long irqbits = *R_IRQ_MASK0_RD;
/* check for underrun irq */
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
- *R_NETWORK_TR_CTRL = IO_STATE(R_NETWORK_TR_CTRL, clr_error, clr);
np->stats.tx_errors++;
D(printk("ethernet excessive collisions!\n"));
}
{
struct sk_buff *skb;
int length = 0;
- struct net_local *np = (struct net_local *)dev->priv;
+ struct net_local *np = netdev_priv(dev);
unsigned char *skb_data_ptr;
#ifdef ETHDEBUG
int i;
#endif
-
+ etrax_eth_descr *prevRxDesc; /* The descriptor right before myNextRxDesc */
+ spin_lock(&np->led_lock);
if (!led_active && time_after(jiffies, led_next_time)) {
/* light the network leds depending on the current speed. */
e100_set_network_leds(NETWORK_ACTIVITY);
led_active = 1;
mod_timer(&clear_led_timer, jiffies + HZ/10);
}
+ spin_unlock(&np->led_lock);
length = myNextRxDesc->descr.hw_len - 4;
- ((struct net_local *)dev->priv)->stats.rx_bytes += length;
+ np->stats.rx_bytes += length;
#ifdef ETHDEBUG
printk("Got a packet of length %d:\n", length);
if (!skb) {
np->stats.rx_errors++;
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
- return;
+ goto update_nextrxdesc;
}
skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */
else {
/* Large packet, send directly to upper layers and allocate new
* memory (aligned to cache line boundary to avoid bug).
- * Before sending the skb to upper layers we must make sure that
- * skb->data points to the aligned start of the packet.
+ * Before sending the skb to upper layers we must make sure
+ * that skb->data points to the aligned start of the packet.
*/
int align;
struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
if (!new_skb) {
np->stats.rx_errors++;
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
- return;
+ goto update_nextrxdesc;
}
skb = myNextRxDesc->skb;
align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data;
/* Send the packet to the upper layers */
netif_rx(skb);
+ update_nextrxdesc:
/* Prepare for next packet */
myNextRxDesc->descr.status = 0;
- myPrevRxDesc = myNextRxDesc;
+ prevRxDesc = myNextRxDesc;
myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next);
rx_queue_len++;
/* Check if descriptors should be returned */
if (rx_queue_len == RX_QUEUE_THRESHOLD) {
flush_etrax_cache();
- myPrevRxDesc->descr.ctrl |= d_eol;
+ prevRxDesc->descr.ctrl |= d_eol;
myLastRxDesc->descr.ctrl &= ~d_eol;
- myLastRxDesc = myPrevRxDesc;
+ myLastRxDesc = prevRxDesc;
rx_queue_len = 0;
}
}
static int
e100_close(struct net_device *dev)
{
- struct net_local *np = (struct net_local *)dev->priv;
+ struct net_local *np = netdev_priv(dev);
printk(KERN_INFO "Closing %s.\n", dev->name);
free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
+ cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
+ cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
+
/* Update the statistics here. */
update_rx_stats(&np->stats);
{
struct mii_ioctl_data *data = if_mii(ifr);
struct net_local *np = netdev_priv(dev);
+ int rc = 0;
+ int old_autoneg;
spin_lock(&np->lock); /* Preempt protection */
switch (cmd) {
- case SIOCGMIIPHY: /* Get PHY address */
- data->phy_id = mdio_phy_addr;
- break;
- case SIOCGMIIREG: /* Read MII register */
- data->val_out = e100_get_mdio_reg(dev, mdio_phy_addr, data->reg_num);
- break;
- case SIOCSMIIREG: /* Write MII register */
- e100_set_mdio_reg(dev, mdio_phy_addr, data->reg_num, data->val_in);
- break;
/* The ioctls below should be considered obsolete but are */
/* still present for compatability with old scripts/apps */
case SET_ETH_SPEED_10: /* 10 Mbps */
case SET_ETH_SPEED_100: /* 100 Mbps */
e100_set_speed(dev, 100);
break;
- case SET_ETH_SPEED_AUTO: /* Auto negotiate speed */
+ case SET_ETH_SPEED_AUTO: /* Auto-negotiate speed */
e100_set_speed(dev, 0);
break;
- case SET_ETH_DUPLEX_HALF: /* Half duplex. */
+ case SET_ETH_DUPLEX_HALF: /* Half duplex */
e100_set_duplex(dev, half);
break;
- case SET_ETH_DUPLEX_FULL: /* Full duplex. */
+ case SET_ETH_DUPLEX_FULL: /* Full duplex */
e100_set_duplex(dev, full);
break;
- case SET_ETH_DUPLEX_AUTO: /* Autonegotiate duplex*/
+ case SET_ETH_DUPLEX_AUTO: /* Auto-negotiate duplex */
e100_set_duplex(dev, autoneg);
break;
+ case SET_ETH_AUTONEG:
+ old_autoneg = autoneg_normal;
+ autoneg_normal = *(int*)data;
+ if (autoneg_normal != old_autoneg)
+ e100_negotiate(dev);
+ break;
default:
- return -EINVAL;
+ rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr),
+ cmd, NULL);
+ break;
}
spin_unlock(&np->lock);
- return 0;
+ return rc;
}
-static int e100_set_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
+static int e100_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
- ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII |
- SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
- ecmd->port = PORT_TP;
- ecmd->transceiver = XCVR_EXTERNAL;
- ecmd->phy_address = mdio_phy_addr;
- ecmd->speed = current_speed;
- ecmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
- ecmd->advertising = ADVERTISED_TP;
+ struct net_local *np = netdev_priv(dev);<