Merge commit 'v2.6.28-rc7'; branch 'x86/dumpstack' into tracing/ftrace
authorIngo Molnar <mingo@elte.hu>
Wed, 3 Dec 2008 07:54:47 +0000 (08:54 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 3 Dec 2008 07:55:34 +0000 (08:55 +0100)
Merge x86/dumpstack into tracing/ftrace because upcoming ftrace changes
depend on cleanups already in x86/dumpstack.

Also merge to latest upstream -rc.

1  2  3 
arch/powerpc/kernel/entry_64.S
arch/x86/kernel/Makefile
arch/x86/mm/fault.c
include/linux/sched.h
kernel/profile.c
kernel/sched.c
kernel/sysctl.c

Simple merge
index d274425fb0766d1edfbf0fa8db95b2c1fd21195e,b62a7667828eb77574128a8ca82b1f54cee28238,db3216a9d2b987d320eb3db7ed102c6f5ab0c312..a3049da61985f777bbbcc0c2eedb9b3820b4d63b
@@@@ -11,15 -11,9 -11,8 +11,15 @@@@ ifdef CONFIG_FUNCTION_TRACE
   CFLAGS_REMOVE_tsc.o = -pg
   CFLAGS_REMOVE_rtc.o = -pg
   CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
  +CFLAGS_REMOVE_ftrace.o = -pg
  +endif
  +
 ++ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ++# Don't trace __switch_to() but let it for function tracer
 ++CFLAGS_REMOVE_process_32.o = -pg
 ++CFLAGS_REMOVE_process_64.o = -pg
 + endif
 + 
   #
   # vsyscalls (which work on the user stack) should have
   # no stack-protector checks:
Simple merge
index 7ad48f2a275875e6af182ac00131a3069047f066,55e30d11447790dd433d0e874285ced3a5499328,5c38db536e07f8b7b0d6e5b0dc4f31a9e3cb07c1..2d0a93c3122837ea55d8a51dd4113313a0181629
@@@@ -1356,26 -1347,15 -1345,7 +1360,26 @@@@ struct task_struct 
   #ifdef CONFIG_LATENCYTOP
        int latency_record_count;
        struct latency_record latency_record[LT_SAVECOUNT];
  +#endif
  +     /*
  +      * time slack values; these are used to round up poll() and
  +      * select() etc timeout values. These are in nanoseconds.
  +      */
  +     unsigned long timer_slack_ns;
  +     unsigned long default_timer_slack_ns;
  +
  +     struct list_head        *scm_work_list;
 ++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ++     /* Index of current stored adress in ret_stack */
 ++     int curr_ret_stack;
 ++     /* Stack of return addresses for return function tracing */
 ++     struct ftrace_ret_stack *ret_stack;
 ++     /*
 ++      * Number of functions that haven't been traced
 ++      * because of depth overrun.
 ++      */
 ++     atomic_t trace_overrun;
 + #endif
   };
   
   /*
index 7f93a5042d3b6011cbc54c0e540e6d178f7e293e,dc41827fbfeea474c809ebc3adb6e54edcedb861,a9e422df6bf63e28648432efa7f0503aa09ffe8f..60adefb59b5e24f4f3d916df1caab686f91bc9f4
@@@@ -544,7 -544,7 -544,7 +544,7 @@@@ static const struct file_operations pro
   };
   
   #ifdef CONFIG_SMP
 - static inline void profile_nop(void *unused)
  -static void __init profile_nop(void *unused)
 ++static void profile_nop(void *unused)
   {
   }
   
diff --cc kernel/sched.c
index 52490bf6b884ab2ce08d0235572c241704903e57,b7480fb5c3dc21a7bf6513a978cf0ed2e8c19a8f,d906f72b42d23ae1d8c2355d9b605e5fd0761eaa..7729c4bbc8baec10e47529142b788c43b71fa501
@@@@ -1459,11 -1453,12 -1439,9 +1459,12 @@@@ static int task_hot(struct task_struct 
   static unsigned long cpu_avg_load_per_task(int cpu)
   {
        struct rq *rq = cpu_rq(cpu);
+ +     unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
   
- -     if (rq->nr_running)
- -             rq->avg_load_per_task = rq->load.weight / rq->nr_running;
+ +     if (nr_running)
+ +             rq->avg_load_per_task = rq->load.weight / nr_running;
  +     else
  +             rq->avg_load_per_task = 0;
   
        return rq->avg_load_per_task;
   }
diff --cc kernel/sysctl.c
Simple merge