4 * Derived from "include/asm-i386/mmu_context.h"
7 #ifndef __S390_MMU_CONTEXT_H
8 #define __S390_MMU_CONTEXT_H
10 #include <asm/pgalloc.h>
11 #include <asm/uaccess.h>
12 #include <asm/tlbflush.h>
13 #include <asm/ctl_reg.h>
15 static inline int init_new_context(struct task_struct *tsk,
18 spin_lock_init(&mm->context.pgtable_lock);
19 INIT_LIST_HEAD(&mm->context.pgtable_list);
20 spin_lock_init(&mm->context.gmap_lock);
21 INIT_LIST_HEAD(&mm->context.gmap_list);
22 cpumask_clear(&mm->context.cpu_attach_mask);
23 atomic_set(&mm->context.attach_count, 0);
24 mm->context.flush_mm = 0;
26 mm->context.alloc_pgste = page_table_allocate_pgste;
27 mm->context.has_pgste = 0;
28 mm->context.use_skey = 0;
30 switch (mm->context.asce_limit) {
33 * forked 3-level task, fall through to set new asce with new
37 /* context created by exec, set asce limit to 4TB */
38 mm->context.asce_limit = STACK_TOP_MAX;
39 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
40 _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
43 /* forked 4-level task, set new asce with new mm->pgd */
44 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
45 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
48 /* forked 2-level compat task, set new asce with new mm->pgd */
49 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
50 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
51 /* pgd_alloc() did not increase mm->nr_pmds */
54 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
58 #define destroy_context(mm) do { } while (0)
60 static inline void set_user_asce(struct mm_struct *mm)
62 S390_lowcore.user_asce = mm->context.asce;
63 if (current->thread.mm_segment.ar4)
64 __ctl_load(S390_lowcore.user_asce, 7, 7);
65 set_cpu_flag(CIF_ASCE);
68 static inline void clear_user_asce(void)
70 S390_lowcore.user_asce = S390_lowcore.kernel_asce;
72 __ctl_load(S390_lowcore.user_asce, 1, 1);
73 __ctl_load(S390_lowcore.user_asce, 7, 7);
76 static inline void load_kernel_asce(void)
80 __ctl_store(asce, 1, 1);
81 if (asce != S390_lowcore.kernel_asce)
82 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
83 set_cpu_flag(CIF_ASCE);
86 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
87 struct task_struct *tsk)
89 int cpu = smp_processor_id();
91 S390_lowcore.user_asce = next->context.asce;
94 if (MACHINE_HAS_TLB_LC)
95 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
96 /* Clear old ASCE by loading the kernel ASCE. */
97 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
98 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
99 atomic_inc(&next->context.attach_count);
100 atomic_dec(&prev->context.attach_count);
101 if (MACHINE_HAS_TLB_LC)
102 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
105 #define finish_arch_post_lock_switch finish_arch_post_lock_switch
106 static inline void finish_arch_post_lock_switch(void)
108 struct task_struct *tsk = current;
109 struct mm_struct *mm = tsk->mm;
114 while (atomic_read(&mm->context.attach_count) >> 16)
117 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
118 if (mm->context.flush_mm)
122 set_fs(current->thread.mm_segment);
125 #define enter_lazy_tlb(mm,tsk) do { } while (0)
126 #define deactivate_mm(tsk,mm) do { } while (0)
128 static inline void activate_mm(struct mm_struct *prev,
129 struct mm_struct *next)
131 switch_mm(prev, next, current);
132 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
136 static inline void arch_dup_mmap(struct mm_struct *oldmm,
137 struct mm_struct *mm)
141 static inline void arch_exit_mmap(struct mm_struct *mm)
145 static inline void arch_unmap(struct mm_struct *mm,
146 struct vm_area_struct *vma,
147 unsigned long start, unsigned long end)
151 static inline void arch_bprm_mm_init(struct mm_struct *mm,
152 struct vm_area_struct *vma)
156 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
157 bool write, bool execute, bool foreign)
159 /* by default, allow everything */
163 static inline bool arch_pte_access_permitted(pte_t pte, bool write)
165 /* by default, allow everything */
168 #endif /* __S390_MMU_CONTEXT_H */