Merge branches 'core/debug', 'core/futexes', 'core/locking', 'core/rcu', 'core/signal...
authorIngo Molnar <mingo@elte.hu>
Mon, 24 Nov 2008 16:44:55 +0000 (17:44 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 24 Nov 2008 16:44:55 +0000 (17:44 +0100)
1  2  3  4  5  6  7  8 
arch/x86/include/asm/uaccess_64.h
include/linux/kernel.h
kernel/exit.c
kernel/futex.c
kernel/lockdep.c
kernel/notifier.c
kernel/sched.c
kernel/softlockup.c
lib/Kconfig.debug

index f8cfd00,0000000,0000000,543ba88,664f152,664f152,f8cfd00,0000000..84210c4
mode 100644,000000,000000,100644,100644,100644,100644,000000..100644
--- /dev/null
--- /dev/null
--- /dev/null
@@@@@@@@@ -1,202 -1,0 -1,0 -1,208 -1,202 -1,202 -1,202 -1,0 +1,208 @@@@@@@@@
   ---                                 ret, "q", "", "=r", 16);
 ++    +#ifndef _ASM_X86_UACCESS_64_H
 ++    +#define _ASM_X86_UACCESS_64_H
 ++    +
 ++    +/*
 ++    + * User space memory access functions
 ++    + */
 ++    +#include <linux/compiler.h>
 ++    +#include <linux/errno.h>
 ++    +#include <linux/prefetch.h>
 ++    +#include <linux/lockdep.h>
 ++    +#include <asm/page.h>
 ++    +
 ++    +/*
 ++    + * Copy To/From Userspace
 ++    + */
 ++    +
 ++    +/* Handles exceptions in both to and from, but doesn't do access_ok */
 ++    +__must_check unsigned long
 ++    +copy_user_generic(void *to, const void *from, unsigned len);
 ++    +
 ++    +__must_check unsigned long
 ++    +copy_to_user(void __user *to, const void *from, unsigned len);
 ++    +__must_check unsigned long
 ++    +copy_from_user(void *to, const void __user *from, unsigned len);
 ++    +__must_check unsigned long
 ++    +copy_in_user(void __user *to, const void __user *from, unsigned len);
 ++    +
 ++    +static __always_inline __must_check
 ++    +int __copy_from_user(void *dst, const void __user *src, unsigned size)
 ++    +{
 ++    +        int ret = 0;
+++ ++++
+++ ++++        might_fault();
 ++    +        if (!__builtin_constant_p(size))
 ++    +                return copy_user_generic(dst, (__force void *)src, size);
 ++    +        switch (size) {
 ++    +        case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
 ++    +                              ret, "b", "b", "=q", 1);
 ++    +                return ret;
 ++    +        case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
 ++    +                              ret, "w", "w", "=r", 2);
 ++    +                return ret;
 ++    +        case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
 ++    +                              ret, "l", "k", "=r", 4);
 ++    +                return ret;
 ++    +        case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
 ++    +                              ret, "q", "", "=r", 8);
 ++    +                return ret;
 ++    +        case 10:
 ++    +                __get_user_asm(*(u64 *)dst, (u64 __user *)src,
 +++++ +                               ret, "q", "", "=r", 10);
 ++    +                if (unlikely(ret))
 ++    +                        return ret;
 ++    +                __get_user_asm(*(u16 *)(8 + (char *)dst),
 ++    +                               (u16 __user *)(8 + (char __user *)src),
 ++    +                               ret, "w", "w", "=r", 2);
 ++    +                return ret;
 ++    +        case 16:
 ++    +                __get_user_asm(*(u64 *)dst, (u64 __user *)src,
 ++    +                               ret, "q", "", "=r", 16);
 ++    +                if (unlikely(ret))
 ++    +                        return ret;
 ++    +                __get_user_asm(*(u64 *)(8 + (char *)dst),
 ++    +                               (u64 __user *)(8 + (char __user *)src),
 ++    +                               ret, "q", "", "=r", 8);
 ++    +                return ret;
 ++    +        default:
 ++    +                return copy_user_generic(dst, (__force void *)src, size);
 ++    +        }
 ++    +}
 ++    +
 ++    +static __always_inline __must_check
 ++    +int __copy_to_user(void __user *dst, const void *src, unsigned size)
 ++    +{
 ++    +        int ret = 0;
+++ ++++
+++ ++++        might_fault();
 ++    +        if (!__builtin_constant_p(size))
 ++    +                return copy_user_generic((__force void *)dst, src, size);
 ++    +        switch (size) {
 ++    +        case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
 ++    +                              ret, "b", "b", "iq", 1);
 ++    +                return ret;
 ++    +        case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
 ++    +                              ret, "w", "w", "ir", 2);
 ++    +                return ret;
 ++    +        case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
 ++    +                              ret, "l", "k", "ir", 4);
 ++    +                return ret;
 ++    +        case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
 ++    +                              ret, "q", "", "ir", 8);
 ++    +                return ret;
 ++    +        case 10:
 ++    +                __put_user_asm(*(u64 *)src, (u64 __user *)dst,
 ++    +                               ret, "q", "", "ir", 10);
 ++    +                if (unlikely(ret))
 ++    +                        return ret;
 ++    +                asm("":::"memory");
 ++    +                __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
 ++    +                               ret, "w", "w", "ir", 2);
 ++    +                return ret;
 ++    +        case 16:
 ++    +                __put_user_asm(*(u64 *)src, (u64 __user *)dst,
 ++    +                               ret, "q", "", "ir", 16);
 ++    +                if (unlikely(ret))
 ++    +                        return ret;
 ++    +                asm("":::"memory");
 ++    +                __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
 ++    +                               ret, "q", "", "ir", 8);
 ++    +                return ret;
 ++    +        default:
 ++    +                return copy_user_generic((__force void *)dst, src, size);
 ++    +        }
 ++    +}
 ++    +
 ++    +static __always_inline __must_check
 ++    +int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
 ++    +{
 ++    +        int ret = 0;
+++ ++++
+++ ++++        might_fault();
 ++    +        if (!__builtin_constant_p(size))
 ++    +                return copy_user_generic((__force void *)dst,
 ++    +                                         (__force void *)src, size);
 ++    +        switch (size) {
 ++    +        case 1: {
 ++    +                u8 tmp;
 ++    +                __get_user_asm(tmp, (u8 __user *)src,
 ++    +                               ret, "b", "b", "=q", 1);
 ++    +                if (likely(!ret))
 ++    +                        __put_user_asm(tmp, (u8 __user *)dst,
 ++    +                                       ret, "b", "b", "iq", 1);
 ++    +                return ret;
 ++    +        }
 ++    +        case 2: {
 ++    +                u16 tmp;
 ++    +                __get_user_asm(tmp, (u16 __user *)src,
 ++    +                               ret, "w", "w", "=r", 2);
 ++    +                if (likely(!ret))
 ++    +                        __put_user_asm(tmp, (u16 __user *)dst,
 ++    +                                       ret, "w", "w", "ir", 2);
 ++    +                return ret;
 ++    +        }
 ++    +
 ++    +        case 4: {
 ++    +                u32 tmp;
 ++    +                __get_user_asm(tmp, (u32 __user *)src,
 ++    +                               ret, "l", "k", "=r", 4);
 ++    +                if (likely(!ret))
 ++    +                        __put_user_asm(tmp, (u32 __user *)dst,
 ++    +                                       ret, "l", "k", "ir", 4);
 ++    +                return ret;
 ++    +        }
 ++    +        case 8: {
 ++    +                u64 tmp;
 ++    +                __get_user_asm(tmp, (u64 __user *)src,
 ++    +                               ret, "q", "", "=r", 8);
 ++    +                if (likely(!ret))
 ++    +                        __put_user_asm(tmp, (u64 __user *)dst,
 ++    +                                       ret, "q", "", "ir", 8);
 ++    +                return ret;
 ++    +        }
 ++    +        default:
 ++    +                return copy_user_generic((__force void *)dst,
 ++    +                                         (__force void *)src, size);
 ++    +        }
 ++    +}
 ++    +
 ++    +__must_check long
 ++    +strncpy_from_user(char *dst, const char __user *src, long count);
 ++    +__must_check long
 ++    +__strncpy_from_user(char *dst, const char __user *src, long count);
 ++    +__must_check long strnlen_user(const char __user *str, long n);
 ++    +__must_check long __strnlen_user(const char __user *str, long n);
 ++    +__must_check long strlen_user(const char __user *str);
 ++    +__must_check unsigned long clear_user(void __user *mem, unsigned long len);
 ++    +__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
 ++    +
 ++    +__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
 ++    +                                            unsigned size);
 ++    +
 ++    +static __must_check __always_inline int
 ++    +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
 ++    +{
 ++    +        return copy_user_generic((__force void *)dst, src, size);
 ++    +}
 ++    +
 ++    +extern long __copy_user_nocache(void *dst, const void __user *src,
 ++    +                                unsigned size, int zerorest);
 ++    +
 ++    +static inline int __copy_from_user_nocache(void *dst, const void __user *src,
 ++    +                                           unsigned size)
 ++    +{
 ++    +        might_sleep();
 ++    +        return __copy_user_nocache(dst, src, size, 1);
 ++    +}
 ++    +
 ++    +static inline int __copy_from_user_inatomic_nocache(void *dst,
 ++    +                                                    const void __user *src,
 ++    +                                                    unsigned size)
 ++    +{
 ++    +        return __copy_user_nocache(dst, src, size, 0);
 ++    +}
 ++    +
 ++    +unsigned long
 ++    +copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
 ++    +
 ++    +#endif /* _ASM_X86_UACCESS_64_H */
@@@@@@@@@ -318,36 -290,28 -288,28 -327,32 -318,32 -318,32 -318,36 -294,32 +329,36 @@@@@@@@@ static inline char *pack_hex_byte(char 
                return buf;
        }
        
 ----- -#define pr_emerg(fmt, arg...) \
 ----- -        printk(KERN_EMERG fmt, ##arg)
 ----- -#define pr_alert(fmt, arg...) \
 ----- -        printk(KERN_ALERT fmt, ##arg)
 ----- -#define pr_crit(fmt, arg...) \
 ----- -        printk(KERN_CRIT fmt, ##arg)
 ----- -#define pr_err(fmt, arg...) \
 ----- -        printk(KERN_ERR fmt, ##arg)
 ----- -#define pr_warning(fmt, arg...) \
 ----- -        printk(KERN_WARNING fmt, ##arg)
 ----- -#define pr_notice(fmt, arg...) \
 ----- -        printk(KERN_NOTICE fmt, ##arg)
 ----- -#define pr_info(fmt, arg...) \
 ----- -        printk(KERN_INFO fmt, ##arg)
 --     
 --     #ifdef DEBUG
 +++++ +#ifndef pr_fmt
 +++++ +#define pr_fmt(fmt) fmt
 +++++ +#endif
 +++++ +
 +++++ +#define pr_emerg(fmt, ...) \
 +++++ +        printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
 +++++ +#define pr_alert(fmt, ...) \
 +++++ +        printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
 +++++ +#define pr_crit(fmt, ...) \
 +++++ +        printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
 +++++ +#define pr_err(fmt, ...) \
 +++++ +        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 +++++ +#define pr_warning(fmt, ...) \
 +++++ +        printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
 +++++ +#define pr_notice(fmt, ...) \
 +++++ +        printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
 +++++ +#define pr_info(fmt, ...) \
 +++++ +        printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 ++     
        /* If you are writing a driver, please use dev_dbg instead */
 --     #define pr_debug(fmt, arg...) \
 --             printk(KERN_DEBUG fmt, ##arg)
 ++     #if defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
 ++     #define pr_debug(fmt, ...) do { \
   --- -        dynamic_pr_debug(fmt, ##__VA_ARGS__); \
 +++++ +        dynamic_pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
 ++             } while (0)
 ++     #elif defined(DEBUG)
   --- -#define pr_debug(fmt, arg...) \
   --- -        printk(KERN_DEBUG fmt, ##arg)
 +++++ +#define pr_debug(fmt, ...) \
 +++++ +        printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
        #else
 ----- -#define pr_debug(fmt, arg...) \
 ----- -        ({ if (0) printk(KERN_DEBUG fmt, ##arg); 0; })
 +++++ +#define pr_debug(fmt, ...) \
 +++++ +        ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
        #endif
        
        /*
diff --cc kernel/exit.c
@@@@@@@@@ -1316,23 -1309,20 -1317,20 -1325,23 -1320,23 -1325,23 -1316,23 -1320,23 +1316,23 @@@@@@@@@ static int wait_task_zombie(struct task
                         * need to protect the access to p->parent->signal fields,
                         * as other threads in the parent group can be right
                         * here reaping other children at the same time.
 ++                      *
 ++                      * We use thread_group_cputime() to get times for the thread
 ++                      * group, which consolidates times for all threads in the
 ++                      * group including the group leader.
                         */
+++++ ++                thread_group_cputime(p, &cputime);
                        spin_lock_irq(&p->parent->sighand->siglock);
                        psig = p->parent->signal;
                        sig = p->signal;
-  -- --                thread_group_cputime(p, &cputime);
                        psig->cutime =
                                cputime_add(psig->cutime,
 --                             cputime_add(p->utime,
 --                             cputime_add(sig->utime,
 --                                         sig->cutime)));
 ++                             cputime_add(cputime.utime,
 ++                                         sig->cutime));
                        psig->cstime =
                                cputime_add(psig->cstime,
 --                             cputime_add(p->stime,
 --                             cputime_add(sig->stime,
 --                                         sig->cstime)));
 ++                             cputime_add(cputime.stime,
 ++                                         sig->cstime));
                        psig->cgtime =
                                cputime_add(psig->cgtime,
                                cputime_add(p->gtime,
diff --cc kernel/futex.c
                 *
                 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
                 * it's a read-only handle, it's expected that futexes attach to
-- -----         * the object not the particular process.  Therefore we use
-- -----         * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
-- -----         * mappings of _writable_ handles.
++ +++++         * the object not the particular process.
                 */
-- -----        if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
-- -----                key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */
++ +++++        if (PageAnon(page)) {
++ +++++                key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
                        key->private.mm = mm;
                        key->private.address = address;
 -     -                return 0;
 -     -        }
 -     -
 -     -        /*
 -     -         * Linear file mappings are also simple.
 -     -         */
 -     -        key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
 -     -        key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
 -     -        if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
 -     -                key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
 -     -                                     + vma->vm_pgoff);
-- -----                return 0;
++ +++++        } else {
++ +++++                key->both.offset |= FUT_OFF_INODE; /* inode-based key */
++ +++++                key->shared.inode = page->mapping->host;
++ +++++                key->shared.pgoff = page->index;
                }
        
-- -----        /*
-  ----          * Linear file mappings are also simple.
 -     -         * We could walk the page table to read the non-linear
 -     -         * pte, and get the page index without fetching the page
 -     -         * from swap.  But that's a lot of code to duplicate here
 -     -         * for a rare case, so we simply fetch the page.
-- -----         */
-  ----         key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
-  ----         key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
-  ----         if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
-  ----                 key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
-  ----                                      + vma->vm_pgoff);
 -     -        err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
 -     -        if (err >= 0) {
 -     -                key->shared.pgoff =
 -     -                        page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 -     -                put_page(page);
-- -----                return 0;
-- -----        }
 -     -        return err;
 -     -}
++ +++++        get_futex_key_refs(key);
        
-  ----         /*
-  ----          * We could walk the page table to read the non-linear
-  ----          * pte, and get the page index without fetching the page
-  ----          * from swap.  But that's a lot of code to duplicate here
-  ----          * for a rare case, so we simply fetch the page.
-  ----          */
-  ----         err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
-  ----         if (err >= 0) {
-  ----                 key->shared.pgoff =
-  ----                         page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-  ----                 put_page(page);
-  ----                 return 0;
-  ----         }
-  ----         return err;
-  ---- }
-  ---- 
-- -----/*
-- ----- * Take a reference to the resource addressed by a key.
-- ----- * Can be called while holding spinlocks.
-- ----- *
-- ----- */
-- -----static void get_futex_key_refs(union futex_key *key)
-- -----{
-- -----        if (key->both.ptr == NULL)
-- -----                return;
-- -----        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
-- -----                case FUT_OFF_INODE:
-- -----                        atomic_inc(&key->shared.inode->i_count);
-- -----                        break;
-- -----                case FUT_OFF_MMSHARED:
-- -----                        atomic_inc(&key->private.mm->mm_count);
-- -----                        break;
-- -----        }
++ +++++        unlock_page(page);
++ +++++        put_page(page);
++ +++++        return 0;
        }
        
-- -----/*
-- ----- * Drop a reference to the resource addressed by a key.
-- ----- * The hash bucket spinlock must not be held.
-- ----- */
-- -----static void drop_futex_key_refs(union futex_key *key)
++ +++++static inline
++ +++++void put_futex_key(int fshared, union futex_key *key)
        {
-- -----        if (!key->both.ptr)
-- -----                return;
-- -----        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
-- -----                case FUT_OFF_INODE:
-- -----                        iput(key->shared.inode);
-- -----                        break;
-- -----                case FUT_OFF_MMSHARED:
-- -----                        mmdrop(key->private.mm);
-- -----                        break;
-- -----        }
++ +++++        drop_futex_key_refs(key);
        }
        
        static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
Simple merge
Simple merge
diff --cc kernel/sched.c
                parent = parent->parent;
                if (parent)
                        goto up;
 ++     out_unlock:
                rcu_read_unlock();
 ++     
 ++             return ret;
 ++     }
 ++     
 ++     static int tg_nop(struct task_group *tg, void *data)
 ++     {
 ++             return 0;
 ++     }
 ++     #endif
 ++     
 ++     #ifdef CONFIG_SMP
 ++     static unsigned long source_load(int cpu, int type);
 ++     static unsigned long target_load(int cpu, int type);
 ++     static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
 ++     
 ++     static unsigned long cpu_avg_load_per_task(int cpu)
 ++     {
 ++             struct rq *rq = cpu_rq(cpu);
 ++     
 ++             if (rq->nr_running)
 ++                     rq->avg_load_per_task = rq->load.weight / rq->nr_running;
 ++++  +        else
 ++++  +                rq->avg_load_per_task = 0;
 ++     
 ++             return rq->avg_load_per_task;
        }
        
 ++     #ifdef CONFIG_FAIR_GROUP_SCHED
 ++     
        static void __set_se_shares(struct sched_entity *se, unsigned long shares);
        
        /*
@@@@@@@@@ -1547,10 -1507,14 -1507,14 -1545,10 -1537,10 -1547,10 -1547,10 -1527,16 +1547,10 @@@@@@@@@ static int tg_shares_up(struct task_gro
                if (!rq_weight)
                        rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
        
 --    -        for_each_cpu_mask(i, sd->span) {
 --    -                struct rq *rq = cpu_rq(i);
 --    -                unsigned long flags;
       -
       -                spin_lock_irqsave(&rq->lock, flags);
       -                __update_group_shares_cpu(tg, i, shares, rq_weight);
       -                spin_unlock_irqrestore(&rq->lock, flags);
       -        }
 ++    +        for_each_cpu_mask(i, sd->span)
 ++    +                update_group_shares_cpu(tg, i, shares, rq_weight);
        
 --                     spin_lock_irqsave(&rq->lock, flags);
 --                     __update_group_shares_cpu(tg, i, shares, rq_weight);
 --                     spin_unlock_irqrestore(&rq->lock, flags);
 --             }
 ++             return 0;
        }
        
        /*
@@@@@@@@@ -9025,25 -8905,16 -8905,19 -9021,25 -9008,25 -9023,25 -9025,25 -9008,25 +9024,25 @@@@@@@@@ long sched_group_rt_period(struct task_
        
        static int sched_rt_global_constraints(void)
        {
 --             struct task_group *tg = &root_task_group;
 --             u64 rt_runtime, rt_period;
 ++             u64 runtime, period;
                int ret = 0;
        
 -              rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
 -              rt_runtime = tg->rt_bandwidth.rt_runtime;
 +              if (sysctl_sched_rt_period <= 0)
 +                      return -EINVAL;
 +      
  -             rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
  -             rt_runtime = tg->rt_bandwidth.rt_runtime;
 ++             runtime = global_rt_runtime();
 ++             period = global_rt_period();
 ++     
 ++             /*
 ++              * Sanity check on the sysctl variables.
 ++              */
 ++             if (runtime > period && runtime != RUNTIME_INF)
 ++                     return -EINVAL;
        
                mutex_lock(&rt_constraints_mutex);
 --             if (!__rt_schedulable(tg, rt_period, rt_runtime))
 --                     ret = -EINVAL;
 ++             read_lock(&tasklist_lock);
 ++             ret = __rt_schedulable(NULL, 0, 0);
 ++             read_unlock(&tasklist_lock);
                mutex_unlock(&rt_constraints_mutex);
        
                return ret;
Simple merge
Simple merge