ath10k: split reset logic from power up
[cascardo/linux.git] / kernel / fork.c
index fbd3497..0cf9cdb 100644 (file)
@@ -374,12 +374,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
         */
        down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
 
-       mm->locked_vm = 0;
-       mm->mmap = NULL;
-       mm->vmacache_seqnum = 0;
-       mm->map_count = 0;
-       cpumask_clear(mm_cpumask(mm));
-       mm->mm_rb = RB_ROOT;
+       mm->total_vm = oldmm->total_vm;
+       mm->shared_vm = oldmm->shared_vm;
+       mm->exec_vm = oldmm->exec_vm;
+       mm->stack_vm = oldmm->stack_vm;
+
        rb_link = &mm->mm_rb.rb_node;
        rb_parent = NULL;
        pprev = &mm->mmap;
@@ -430,7 +429,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                                atomic_dec(&inode->i_writecount);
                        mutex_lock(&mapping->i_mmap_mutex);
                        if (tmp->vm_flags & VM_SHARED)
-                               mapping->i_mmap_writable++;
+                               atomic_inc(&mapping->i_mmap_writable);
                        flush_dcache_mmap_lock(mapping);
                        /* insert tmp into the share list, just after mpnt */
                        if (unlikely(tmp->vm_flags & VM_NONLINEAR))
@@ -536,19 +535,37 @@ static void mm_init_aio(struct mm_struct *mm)
 #endif
 }
 
+static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
+{
+#ifdef CONFIG_MEMCG
+       mm->owner = p;
+#endif
+}
+
 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
 {
+       mm->mmap = NULL;
+       mm->mm_rb = RB_ROOT;
+       mm->vmacache_seqnum = 0;
        atomic_set(&mm->mm_users, 1);
        atomic_set(&mm->mm_count, 1);
        init_rwsem(&mm->mmap_sem);
        INIT_LIST_HEAD(&mm->mmlist);
        mm->core_state = NULL;
        atomic_long_set(&mm->nr_ptes, 0);
+       mm->map_count = 0;
+       mm->locked_vm = 0;
+       mm->pinned_vm = 0;
        memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
        spin_lock_init(&mm->page_table_lock);
+       mm_init_cpumask(mm);
        mm_init_aio(mm);
        mm_init_owner(mm, p);
+       mmu_notifier_mm_init(mm);
        clear_tlb_flush_pending(mm);
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+       mm->pmd_huge_pte = NULL;
+#endif
 
        if (current->mm) {
                mm->flags = current->mm->flags & MMF_INIT_MASK;
@@ -558,11 +575,17 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
                mm->def_flags = 0;
        }
 
-       if (likely(!mm_alloc_pgd(mm))) {
-               mmu_notifier_mm_init(mm);
-               return mm;
-       }
+       if (mm_alloc_pgd(mm))
+               goto fail_nopgd;
+
+       if (init_new_context(p, mm))
+               goto fail_nocontext;
+
+       return mm;
 
+fail_nocontext:
+       mm_free_pgd(mm);
+fail_nopgd:
        free_mm(mm);
        return NULL;
 }
@@ -596,7 +619,6 @@ struct mm_struct *mm_alloc(void)
                return NULL;
 
        memset(mm, 0, sizeof(*mm));
-       mm_init_cpumask(mm);
        return mm_init(mm, current);
 }
 
@@ -828,17 +850,10 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
                goto fail_nomem;
 
        memcpy(mm, oldmm, sizeof(*mm));
-       mm_init_cpumask(mm);
 
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
-       mm->pmd_huge_pte = NULL;
-#endif
        if (!mm_init(mm, tsk))
                goto fail_nomem;
 
-       if (init_new_context(tsk, mm))
-               goto fail_nocontext;
-
        dup_mm_exe_file(oldmm, mm);
 
        err = dup_mmap(mm, oldmm);
@@ -860,15 +875,6 @@ free_pt:
 
 fail_nomem:
        return NULL;
-
-fail_nocontext:
-       /*
-        * If init_new_context() failed, we cannot use mmput() to free the mm
-        * because it calls destroy_context()
-        */
-       mm_free_pgd(mm);
-       free_mm(mm);
-       return NULL;
 }
 
 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
@@ -1099,7 +1105,7 @@ static void copy_seccomp(struct task_struct *p)
         * needed because this new task is not yet running and cannot
         * be racing exec.
         */
-       BUG_ON(!spin_is_locked(&current->sighand->siglock));
+       assert_spin_locked(&current->sighand->siglock);
 
        /* Ref-count the new filter user, and assign it. */
        get_seccomp_filter(current);
@@ -1140,13 +1146,6 @@ static void rt_mutex_init_task(struct task_struct *p)
 #endif
 }
 
-#ifdef CONFIG_MEMCG
-void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
-{
-       mm->owner = p;
-}
-#endif /* CONFIG_MEMCG */
-
 /*
  * Initialize POSIX timer handling for a single task.
  */
@@ -1346,10 +1345,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 #ifdef CONFIG_DEBUG_MUTEXES
        p->blocked_on = NULL; /* not blocked yet */
 #endif
-#ifdef CONFIG_MEMCG
-       p->memcg_batch.do_batch = 0;
-       p->memcg_batch.memcg = NULL;
-#endif
 #ifdef CONFIG_BCACHE
        p->sequential_io        = 0;
        p->sequential_io_avg    = 0;
@@ -1367,6 +1362,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        if (retval)
                goto bad_fork_cleanup_policy;
        /* copy all the process information */
+       shm_init_task(p);
        retval = copy_semundo(clone_flags, p);
        if (retval)
                goto bad_fork_cleanup_audit;
@@ -1918,6 +1914,11 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
                         */
                        exit_sem(current);
                }
+               if (unshare_flags & CLONE_NEWIPC) {
+                       /* Orphan segments in old ns (see sem above). */
+                       exit_shm(current);
+                       shm_init_task(current);
+               }
 
                if (new_nsproxy)
                        switch_task_namespaces(current, new_nsproxy);