Merge branch 'linus' into sched/core
authorIngo Molnar <mingo@elte.hu>
Sun, 25 Oct 2009 16:30:53 +0000 (17:30 +0100)
committerIngo Molnar <mingo@elte.hu>
Sun, 25 Oct 2009 16:30:53 +0000 (17:30 +0100)
Conflicts:
fs/proc/array.c

Merge reason: resolve conflict and queue up dependent patch.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
1  2 
fs/proc/array.c
kernel/cpuset.c
kernel/sched.c

diff --cc fs/proc/array.c
@@@ -321,16 -322,94 +322,104 @@@ static inline void task_context_switch_
                        p->nivcsw);
  }
  
+ #ifdef CONFIG_MMU
+ struct stack_stats {
+       struct vm_area_struct *vma;
+       unsigned long   startpage;
+       unsigned long   usage;
+ };
+ static int stack_usage_pte_range(pmd_t *pmd, unsigned long addr,
+                               unsigned long end, struct mm_walk *walk)
+ {
+       struct stack_stats *ss = walk->private;
+       struct vm_area_struct *vma = ss->vma;
+       pte_t *pte, ptent;
+       spinlock_t *ptl;
+       int ret = 0;
+       pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+       for (; addr != end; pte++, addr += PAGE_SIZE) {
+               ptent = *pte;
+ #ifdef CONFIG_STACK_GROWSUP
+               if (pte_present(ptent) || is_swap_pte(ptent))
+                       ss->usage = addr - ss->startpage + PAGE_SIZE;
+ #else
+               if (pte_present(ptent) || is_swap_pte(ptent)) {
+                       ss->usage = ss->startpage - addr + PAGE_SIZE;
+                       pte++;
+                       ret = 1;
+                       break;
+               }
+ #endif
+       }
+       pte_unmap_unlock(pte - 1, ptl);
+       cond_resched();
+       return ret;
+ }
+ static inline unsigned long get_stack_usage_in_bytes(struct vm_area_struct *vma,
+                               struct task_struct *task)
+ {
+       struct stack_stats ss;
+       struct mm_walk stack_walk = {
+               .pmd_entry = stack_usage_pte_range,
+               .mm = vma->vm_mm,
+               .private = &ss,
+       };
+       if (!vma->vm_mm || is_vm_hugetlb_page(vma))
+               return 0;
+       ss.vma = vma;
+       ss.startpage = task->stack_start & PAGE_MASK;
+       ss.usage = 0;
+ #ifdef CONFIG_STACK_GROWSUP
+       walk_page_range(KSTK_ESP(task) & PAGE_MASK, vma->vm_end,
+               &stack_walk);
+ #else
+       walk_page_range(vma->vm_start, (KSTK_ESP(task) & PAGE_MASK) + PAGE_SIZE,
+               &stack_walk);
+ #endif
+       return ss.usage;
+ }
+ static inline void task_show_stack_usage(struct seq_file *m,
+                                               struct task_struct *task)
+ {
+       struct vm_area_struct   *vma;
+       struct mm_struct        *mm = get_task_mm(task);
+       if (mm) {
+               down_read(&mm->mmap_sem);
+               vma = find_vma(mm, task->stack_start);
+               if (vma)
+                       seq_printf(m, "Stack usage:\t%lu kB\n",
+                               get_stack_usage_in_bytes(vma, task) >> 10);
+               up_read(&mm->mmap_sem);
+               mmput(mm);
+       }
+ }
+ #else
+ static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
+ {
+ }
+ #endif                /* CONFIG_MMU */
 +static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
 +{
 +      seq_printf(m, "Cpus_allowed:\t");
 +      seq_cpumask(m, &task->cpus_allowed);
 +      seq_printf(m, "\n");
 +      seq_printf(m, "Cpus_allowed_list:\t");
 +      seq_cpumask_list(m, &task->cpus_allowed);
 +      seq_printf(m, "\n");
 +}
 +
  int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
                        struct pid *pid, struct task_struct *task)
  {
diff --cc kernel/cpuset.c
Simple merge
diff --cc kernel/sched.c
Simple merge