Merge branches 'core/debug', 'core/futexes', 'core/locking', 'core/rcu', 'core/signal...
authorIngo Molnar <mingo@elte.hu>
Mon, 24 Nov 2008 16:44:55 +0000 (17:44 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 24 Nov 2008 16:44:55 +0000 (17:44 +0100)
1  2  3  4  5  6  7  8 
arch/x86/include/asm/uaccess_64.h
include/linux/kernel.h
kernel/exit.c
kernel/futex.c
kernel/lockdep.c
kernel/notifier.c
kernel/sched.c
kernel/softlockup.c
lib/Kconfig.debug

index f8cfd00db450f2e0f948ce7128a2d44867aebf59,515d4dce96b598bc6e9d07dba21332a44924948c,515d4dce96b598bc6e9d07dba21332a44924948c,543ba883cc66200ff0e2206aec36b434b63d3695,664f15280f14354dc057e1d97954db6baab4b959,664f15280f14354dc057e1d97954db6baab4b959,f8cfd00db450f2e0f948ce7128a2d44867aebf59,c96c1f5d07a2c88e4f5ca547ea0f7154ac68583c..84210c479fca83524c6cef4c6bc069bcff76e272
@@@@@@@@@ -1,5 -1,5 -1,5 -1,5 -1,5 -1,5 -1,5 -1,5 +1,5 @@@@@@@@@
 --     #ifndef __X86_64_UACCESS_H
 --     #define __X86_64_UACCESS_H
       -#ifndef ASM_X86__UACCESS_64_H
       -#define ASM_X86__UACCESS_64_H
 ++    +#ifndef _ASM_X86_UACCESS_64_H
 ++    +#define _ASM_X86_UACCESS_64_H
        
        /*
         * User space memory access functions
@@@@@@@@@ -199,4 -198,4 -198,4 -205,4 -199,4 -199,4 -199,4 -199,4 +205,4 @@@@@@@@@ static inline int __copy_from_user_inat
        unsigned long
        copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
        
 --     #endif /* __X86_64_UACCESS_H */
       -#endif /* ASM_X86__UACCESS_64_H */
 ++    +#endif /* _ASM_X86_UACCESS_64_H */
index dc7e0d0a6474448aba71b4c32d2045afc44e240e,3f30557be2a3f2e209a2abd60c21ddef9086fc96,2651f805ba6d771b9ec1f26078609aebdb198853,69a9bfdf9c86d9de03919277d108fe242a4d1e4f,fba141d3ca0783303c661f39fb2c503ba418dc56,fba141d3ca0783303c661f39fb2c503ba418dc56,dc7e0d0a6474448aba71b4c32d2045afc44e240e,94d17ff64c5a3b48c0f6716d29b9832d6a896551..269df5a17b30af1b7349c131da05abec8aa95046
@@@@@@@@@ -318,36 -290,28 -288,28 -327,32 -318,32 -318,32 -318,36 -294,32 +329,36 @@@@@@@@@ static inline char *pack_hex_byte(char 
                return buf;
        }
        
 ----- -#define pr_emerg(fmt, arg...) \
 ----- -        printk(KERN_EMERG fmt, ##arg)
 ----- -#define pr_alert(fmt, arg...) \
 ----- -        printk(KERN_ALERT fmt, ##arg)
 ----- -#define pr_crit(fmt, arg...) \
 ----- -        printk(KERN_CRIT fmt, ##arg)
 ----- -#define pr_err(fmt, arg...) \
 ----- -        printk(KERN_ERR fmt, ##arg)
 ----- -#define pr_warning(fmt, arg...) \
 ----- -        printk(KERN_WARNING fmt, ##arg)
 ----- -#define pr_notice(fmt, arg...) \
 ----- -        printk(KERN_NOTICE fmt, ##arg)
 ----- -#define pr_info(fmt, arg...) \
 ----- -        printk(KERN_INFO fmt, ##arg)
 --     
 --     #ifdef DEBUG
 +++++ +#ifndef pr_fmt
 +++++ +#define pr_fmt(fmt) fmt
 +++++ +#endif
 +++++ +
 +++++ +#define pr_emerg(fmt, ...) \
 +++++ +        printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
 +++++ +#define pr_alert(fmt, ...) \
 +++++ +        printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
 +++++ +#define pr_crit(fmt, ...) \
 +++++ +        printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
 +++++ +#define pr_err(fmt, ...) \
 +++++ +        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 +++++ +#define pr_warning(fmt, ...) \
 +++++ +        printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
 +++++ +#define pr_notice(fmt, ...) \
 +++++ +        printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
 +++++ +#define pr_info(fmt, ...) \
 +++++ +        printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 ++     
        /* If you are writing a driver, please use dev_dbg instead */
 --     #define pr_debug(fmt, arg...) \
 --             printk(KERN_DEBUG fmt, ##arg)
 ++     #if defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
 ++     #define pr_debug(fmt, ...) do { \
   --- -        dynamic_pr_debug(fmt, ##__VA_ARGS__); \
 +++++ +        dynamic_pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
 ++             } while (0)
 ++     #elif defined(DEBUG)
   --- -#define pr_debug(fmt, arg...) \
   --- -        printk(KERN_DEBUG fmt, ##arg)
 +++++ +#define pr_debug(fmt, ...) \
 +++++ +        printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
        #else
 ----- -#define pr_debug(fmt, arg...) \
 ----- -        ({ if (0) printk(KERN_DEBUG fmt, ##arg); 0; })
 +++++ +#define pr_debug(fmt, ...) \
 +++++ +        ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
        #endif
        
        /*
diff --cc kernel/exit.c
index 2d8be7ebb0f73499f894a1828fd827f0217290f1,16395644a98ff8c060b2f5fd776fe7abadd61c6a,85a83c831856c193570e40a3b7d3e03ef8862d1c,ae2b92be5faec1efa73beefb63304a22d030fc16,80137a5d9467811ba4dab35c6e95790002a5f12a,b9c4d8bb72e5aedb49b9aab8ff96dd7ff8380f9e,2d8be7ebb0f73499f894a1828fd827f0217290f1,80137a5d9467811ba4dab35c6e95790002a5f12a..30fcdf16737a2bb013a78b62bc6b7f1eb6eb165b
@@@@@@@@@ -1316,23 -1309,20 -1317,20 -1325,23 -1320,23 -1325,23 -1316,23 -1320,23 +1316,23 @@@@@@@@@ static int wait_task_zombie(struct task
                         * need to protect the access to p->parent->signal fields,
                         * as other threads in the parent group can be right
                         * here reaping other children at the same time.
 ++                      *
 ++                      * We use thread_group_cputime() to get times for the thread
 ++                      * group, which consolidates times for all threads in the
 ++                      * group including the group leader.
                         */
+++++ ++                thread_group_cputime(p, &cputime);
                        spin_lock_irq(&p->parent->sighand->siglock);
                        psig = p->parent->signal;
                        sig = p->signal;
-  -- --                thread_group_cputime(p, &cputime);
                        psig->cutime =
                                cputime_add(psig->cutime,
 --                             cputime_add(p->utime,
 --                             cputime_add(sig->utime,
 --                                         sig->cutime)));
 ++                             cputime_add(cputime.utime,
 ++                                         sig->cutime));
                        psig->cstime =
                                cputime_add(psig->cstime,
 --                             cputime_add(p->stime,
 --                             cputime_add(sig->stime,
 --                                         sig->cstime)));
 ++                             cputime_add(cputime.stime,
 ++                                         sig->cstime));
                        psig->cgtime =
                                cputime_add(psig->cgtime,
                                cputime_add(p->gtime,
diff --cc kernel/futex.c
index 8af10027514bb1cc9cb2702051330e52bf43a533,7d1136e97c142d198b897dab1846acd99f1f655f,62cbd648e28a663fc5e165adf177d227bed38a33,8af10027514bb1cc9cb2702051330e52bf43a533,8af10027514bb1cc9cb2702051330e52bf43a533,8af10027514bb1cc9cb2702051330e52bf43a533,8af10027514bb1cc9cb2702051330e52bf43a533,7d1136e97c142d198b897dab1846acd99f1f655f..e10c5c8786a614619c943f5102189fdb428c5ac3
                 *
                 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
                 * it's a read-only handle, it's expected that futexes attach to
-- -----         * the object not the particular process.  Therefore we use
-- -----         * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
-- -----         * mappings of _writable_ handles.
++ +++++         * the object not the particular process.
                 */
-- -----        if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
-- -----                key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */
++ +++++        if (PageAnon(page)) {
++ +++++                key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
                        key->private.mm = mm;
                        key->private.address = address;
 -     -                return 0;
 -     -        }
 -     -
 -     -        /*
 -     -         * Linear file mappings are also simple.
 -     -         */
 -     -        key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
 -     -        key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
 -     -        if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
 -     -                key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
 -     -                                     + vma->vm_pgoff);
-- -----                return 0;
++ +++++        } else {
++ +++++                key->both.offset |= FUT_OFF_INODE; /* inode-based key */
++ +++++                key->shared.inode = page->mapping->host;
++ +++++                key->shared.pgoff = page->index;
                }
        
-- -----        /*
-  ----          * Linear file mappings are also simple.
 -     -         * We could walk the page table to read the non-linear
 -     -         * pte, and get the page index without fetching the page
 -     -         * from swap.  But that's a lot of code to duplicate here
 -     -         * for a rare case, so we simply fetch the page.
-- -----         */
-  ----         key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
-  ----         key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
-  ----         if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
-  ----                 key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
-  ----                                      + vma->vm_pgoff);
 -     -        err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
 -     -        if (err >= 0) {
 -     -                key->shared.pgoff =
 -     -                        page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 -     -                put_page(page);
-- -----                return 0;
-- -----        }
 -     -        return err;
 -     -}
++ +++++        get_futex_key_refs(key);
        
-  ----         /*
-  ----          * We could walk the page table to read the non-linear
-  ----          * pte, and get the page index without fetching the page
-  ----          * from swap.  But that's a lot of code to duplicate here
-  ----          * for a rare case, so we simply fetch the page.
-  ----          */
-  ----         err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
-  ----         if (err >= 0) {
-  ----                 key->shared.pgoff =
-  ----                         page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-  ----                 put_page(page);
-  ----                 return 0;
-  ----         }
-  ----         return err;
-  ---- }
-  ---- 
-- -----/*
-- ----- * Take a reference to the resource addressed by a key.
-- ----- * Can be called while holding spinlocks.
-- ----- *
-- ----- */
-- -----static void get_futex_key_refs(union futex_key *key)
-- -----{
-- -----        if (key->both.ptr == NULL)
-- -----                return;
-- -----        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
-- -----                case FUT_OFF_INODE:
-- -----                        atomic_inc(&key->shared.inode->i_count);
-- -----                        break;
-- -----                case FUT_OFF_MMSHARED:
-- -----                        atomic_inc(&key->private.mm->mm_count);
-- -----                        break;
-- -----        }
++ +++++        unlock_page(page);
++ +++++        put_page(page);
++ +++++        return 0;
        }
        
-- -----/*
-- ----- * Drop a reference to the resource addressed by a key.
-- ----- * The hash bucket spinlock must not be held.
-- ----- */
-- -----static void drop_futex_key_refs(union futex_key *key)
++ +++++static inline
++ +++++void put_futex_key(int fshared, union futex_key *key)
        {
-- -----        if (!key->both.ptr)
-- -----                return;
-- -----        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
-- -----                case FUT_OFF_INODE:
-- -----                        iput(key->shared.inode);
-- -----                        break;
-- -----                case FUT_OFF_MMSHARED:
-- -----                        mmdrop(key->private.mm);
-- -----                        break;
-- -----        }
++ +++++        drop_futex_key_refs(key);
        }
        
        static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
Simple merge
Simple merge
diff --cc kernel/sched.c
index 9b1e79371c207b37c1617d3f7c0460709a3cc39b,cc1f81b50b82dddb19658dc4d10dd419087a59fc,13dd2db9fb2dc185a4a95a86aab9f89b4850ccaf,2a106b6b78b09006f75274defb2057b6e7a428e7,e8819bc6f462c18761c11861b4808fd28223b431,b388c9b243e94c71e15df02692ae3fb3658482da,9b1e79371c207b37c1617d3f7c0460709a3cc39b,d906f72b42d23ae1d8c2355d9b605e5fd0761eaa..558e5f284269bfd59a23008ad4906b51269047b2
                parent = parent->parent;
                if (parent)
                        goto up;
 ++     out_unlock:
                rcu_read_unlock();
 ++     
 ++             return ret;
 ++     }
 ++     
 ++     static int tg_nop(struct task_group *tg, void *data)
 ++     {
 ++             return 0;
 ++     }
 ++     #endif
 ++     
 ++     #ifdef CONFIG_SMP
 ++     static unsigned long source_load(int cpu, int type);
 ++     static unsigned long target_load(int cpu, int type);
 ++     static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
 ++     
 ++     static unsigned long cpu_avg_load_per_task(int cpu)
 ++     {
 ++             struct rq *rq = cpu_rq(cpu);
 ++     
 ++             if (rq->nr_running)
 ++                     rq->avg_load_per_task = rq->load.weight / rq->nr_running;
 ++++  +        else
 ++++  +                rq->avg_load_per_task = 0;
 ++     
 ++             return rq->avg_load_per_task;
        }
        
 ++     #ifdef CONFIG_FAIR_GROUP_SCHED
 ++     
        static void __set_se_shares(struct sched_entity *se, unsigned long shares);
        
        /*
@@@@@@@@@ -1547,10 -1507,14 -1507,14 -1545,10 -1537,10 -1547,10 -1547,10 -1527,16 +1547,10 @@@@@@@@@ static int tg_shares_up(struct task_gro
                if (!rq_weight)
                        rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
        
 --    -        for_each_cpu_mask(i, sd->span) {
 --    -                struct rq *rq = cpu_rq(i);
 --    -                unsigned long flags;
       -
       -                spin_lock_irqsave(&rq->lock, flags);
       -                __update_group_shares_cpu(tg, i, shares, rq_weight);
       -                spin_unlock_irqrestore(&rq->lock, flags);
       -        }
 ++    +        for_each_cpu_mask(i, sd->span)
 ++    +                update_group_shares_cpu(tg, i, shares, rq_weight);
        
 --                     spin_lock_irqsave(&rq->lock, flags);
 --                     __update_group_shares_cpu(tg, i, shares, rq_weight);
 --                     spin_unlock_irqrestore(&rq->lock, flags);
 --             }
 ++             return 0;
        }
        
        /*
@@@@@@@@@ -9025,25 -8905,16 -8905,19 -9021,25 -9008,25 -9023,25 -9025,25 -9008,25 +9024,25 @@@@@@@@@ long sched_group_rt_period(struct task_
        
        static int sched_rt_global_constraints(void)
        {
 --             struct task_group *tg = &root_task_group;
 --             u64 rt_runtime, rt_period;
 ++             u64 runtime, period;
                int ret = 0;
        
 -              rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
 -              rt_runtime = tg->rt_bandwidth.rt_runtime;
 +              if (sysctl_sched_rt_period <= 0)
 +                      return -EINVAL;
 +      
  -             rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
  -             rt_runtime = tg->rt_bandwidth.rt_runtime;
 ++             runtime = global_rt_runtime();
 ++             period = global_rt_period();
 ++     
 ++             /*
 ++              * Sanity check on the sysctl variables.
 ++              */
 ++             if (runtime > period && runtime != RUNTIME_INF)
 ++                     return -EINVAL;
        
                mutex_lock(&rt_constraints_mutex);
 --             if (!__rt_schedulable(tg, rt_period, rt_runtime))
 --                     ret = -EINVAL;
 ++             read_lock(&tasklist_lock);
 ++             ret = __rt_schedulable(NULL, 0, 0);
 ++             read_unlock(&tasklist_lock);
                mutex_unlock(&rt_constraints_mutex);
        
                return ret;
Simple merge
Simple merge