2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/prctl.h>
29 #include <linux/init_task.h>
30 #include <linux/export.h>
31 #include <linux/kallsyms.h>
32 #include <linux/mqueue.h>
33 #include <linux/hardirq.h>
34 #include <linux/utsname.h>
35 #include <linux/ftrace.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/personality.h>
38 #include <linux/random.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/uaccess.h>
42 #include <asm/pgtable.h>
44 #include <asm/processor.h>
47 #include <asm/machdep.h>
49 #include <asm/runlatch.h>
50 #include <asm/syscalls.h>
51 #include <asm/switch_to.h>
53 #include <asm/debug.h>
55 #include <asm/firmware.h>
57 #include <asm/code-patching.h>
58 #include <linux/kprobes.h>
59 #include <linux/kdebug.h>
61 /* Transactional Memory debug */
63 #define TM_DEBUG(x...) printk(KERN_INFO x)
65 #define TM_DEBUG(x...) do { } while(0)
68 extern unsigned long _get_SP(void);
70 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
71 void giveup_fpu_maybe_transactional(struct task_struct *tsk)
74 * If we are saving the current thread's registers, and the
75 * thread is in a transactional state, set the TIF_RESTORE_TM
76 * bit so that we know to restore the registers before
77 * returning to userspace.
79 if (tsk == current && tsk->thread.regs &&
80 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
81 !test_thread_flag(TIF_RESTORE_TM)) {
82 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
83 set_thread_flag(TIF_RESTORE_TM);
89 void giveup_altivec_maybe_transactional(struct task_struct *tsk)
92 * If we are saving the current thread's registers, and the
93 * thread is in a transactional state, set the TIF_RESTORE_TM
94 * bit so that we know to restore the registers before
95 * returning to userspace.
97 if (tsk == current && tsk->thread.regs &&
98 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
99 !test_thread_flag(TIF_RESTORE_TM)) {
100 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
101 set_thread_flag(TIF_RESTORE_TM);
108 #define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk)
109 #define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk)
110 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
112 #ifdef CONFIG_PPC_FPU
114 * Make sure the floating-point register state in the
115 * the thread_struct is up to date for task tsk.
117 void flush_fp_to_thread(struct task_struct *tsk)
119 if (tsk->thread.regs) {
121 * We need to disable preemption here because if we didn't,
122 * another process could get scheduled after the regs->msr
123 * test but before we have finished saving the FP registers
124 * to the thread_struct. That process could take over the
125 * FPU, and then when we get scheduled again we would store
126 * bogus values for the remaining FP registers.
129 if (tsk->thread.regs->msr & MSR_FP) {
131 * This should only ever be called for current or
132 * for a stopped child process. Since we save away
133 * the FP register state on context switch,
134 * there is something wrong if a stopped child appears
135 * to still have its FP state in the CPU registers.
137 BUG_ON(tsk != current);
138 giveup_fpu_maybe_transactional(tsk);
143 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
144 #endif /* CONFIG_PPC_FPU */
146 void enable_kernel_fp(void)
148 WARN_ON(preemptible());
150 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
151 giveup_fpu_maybe_transactional(current);
153 giveup_fpu(NULL); /* just enables FP for kernel */
155 EXPORT_SYMBOL(enable_kernel_fp);
157 #ifdef CONFIG_ALTIVEC
158 void enable_kernel_altivec(void)
160 WARN_ON(preemptible());
162 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
163 giveup_altivec_maybe_transactional(current);
165 giveup_altivec_notask();
167 EXPORT_SYMBOL(enable_kernel_altivec);
170 * Make sure the VMX/Altivec register state in the
171 * the thread_struct is up to date for task tsk.
173 void flush_altivec_to_thread(struct task_struct *tsk)
175 if (tsk->thread.regs) {
177 if (tsk->thread.regs->msr & MSR_VEC) {
178 BUG_ON(tsk != current);
179 giveup_altivec_maybe_transactional(tsk);
184 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
185 #endif /* CONFIG_ALTIVEC */
188 void enable_kernel_vsx(void)
190 WARN_ON(preemptible());
192 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
195 giveup_vsx(NULL); /* just enable vsx for kernel - force */
197 EXPORT_SYMBOL(enable_kernel_vsx);
199 void giveup_vsx(struct task_struct *tsk)
201 giveup_fpu_maybe_transactional(tsk);
202 giveup_altivec_maybe_transactional(tsk);
205 EXPORT_SYMBOL(giveup_vsx);
207 void flush_vsx_to_thread(struct task_struct *tsk)
209 if (tsk->thread.regs) {
211 if (tsk->thread.regs->msr & MSR_VSX) {
212 BUG_ON(tsk != current);
218 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
219 #endif /* CONFIG_VSX */
223 void enable_kernel_spe(void)
225 WARN_ON(preemptible());
227 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
230 giveup_spe(NULL); /* just enable SPE for kernel - force */
232 EXPORT_SYMBOL(enable_kernel_spe);
234 void flush_spe_to_thread(struct task_struct *tsk)
236 if (tsk->thread.regs) {
238 if (tsk->thread.regs->msr & MSR_SPE) {
239 BUG_ON(tsk != current);
240 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
246 #endif /* CONFIG_SPE */
248 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
249 void do_send_trap(struct pt_regs *regs, unsigned long address,
250 unsigned long error_code, int signal_code, int breakpt)
254 current->thread.trap_nr = signal_code;
255 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
256 11, SIGSEGV) == NOTIFY_STOP)
259 /* Deliver the signal to userspace */
260 info.si_signo = SIGTRAP;
261 info.si_errno = breakpt; /* breakpoint or watchpoint id */
262 info.si_code = signal_code;
263 info.si_addr = (void __user *)address;
264 force_sig_info(SIGTRAP, &info, current);
266 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
267 void do_break (struct pt_regs *regs, unsigned long address,
268 unsigned long error_code)
272 current->thread.trap_nr = TRAP_HWBKPT;
273 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
274 11, SIGSEGV) == NOTIFY_STOP)
277 if (debugger_break_match(regs))
280 /* Clear the breakpoint */
281 hw_breakpoint_disable();
283 /* Deliver the signal to userspace */
284 info.si_signo = SIGTRAP;
286 info.si_code = TRAP_HWBKPT;
287 info.si_addr = (void __user *)address;
288 force_sig_info(SIGTRAP, &info, current);
290 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
292 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
294 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
296 * Set the debug registers back to their default "safe" values.
298 static void set_debug_reg_defaults(struct thread_struct *thread)
300 thread->debug.iac1 = thread->debug.iac2 = 0;
301 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
302 thread->debug.iac3 = thread->debug.iac4 = 0;
304 thread->debug.dac1 = thread->debug.dac2 = 0;
305 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
306 thread->debug.dvc1 = thread->debug.dvc2 = 0;
308 thread->debug.dbcr0 = 0;
311 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
313 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
314 DBCR1_IAC3US | DBCR1_IAC4US;
316 * Force Data Address Compare User/Supervisor bits to be User-only
317 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
319 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
321 thread->debug.dbcr1 = 0;
325 static void prime_debug_regs(struct debug_reg *debug)
328 * We could have inherited MSR_DE from userspace, since
329 * it doesn't get cleared on exception entry. Make sure
330 * MSR_DE is clear before we enable any debug events.
332 mtmsr(mfmsr() & ~MSR_DE);
334 mtspr(SPRN_IAC1, debug->iac1);
335 mtspr(SPRN_IAC2, debug->iac2);
336 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
337 mtspr(SPRN_IAC3, debug->iac3);
338 mtspr(SPRN_IAC4, debug->iac4);
340 mtspr(SPRN_DAC1, debug->dac1);
341 mtspr(SPRN_DAC2, debug->dac2);
342 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
343 mtspr(SPRN_DVC1, debug->dvc1);
344 mtspr(SPRN_DVC2, debug->dvc2);
346 mtspr(SPRN_DBCR0, debug->dbcr0);
347 mtspr(SPRN_DBCR1, debug->dbcr1);
349 mtspr(SPRN_DBCR2, debug->dbcr2);
353 * Unless neither the old or new thread are making use of the
354 * debug registers, set the debug registers from the values
355 * stored in the new thread.
357 void switch_booke_debug_regs(struct debug_reg *new_debug)
359 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
360 || (new_debug->dbcr0 & DBCR0_IDM))
361 prime_debug_regs(new_debug);
363 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
364 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
365 #ifndef CONFIG_HAVE_HW_BREAKPOINT
366 static void set_debug_reg_defaults(struct thread_struct *thread)
368 thread->hw_brk.address = 0;
369 thread->hw_brk.type = 0;
370 set_breakpoint(&thread->hw_brk);
372 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
373 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
375 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
376 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
378 mtspr(SPRN_DAC1, dabr);
379 #ifdef CONFIG_PPC_47x
384 #elif defined(CONFIG_PPC_BOOK3S)
385 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
387 mtspr(SPRN_DABR, dabr);
388 if (cpu_has_feature(CPU_FTR_DABRX))
389 mtspr(SPRN_DABRX, dabrx);
393 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
399 static inline int set_dabr(struct arch_hw_breakpoint *brk)
401 unsigned long dabr, dabrx;
403 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
404 dabrx = ((brk->type >> 3) & 0x7);
407 return ppc_md.set_dabr(dabr, dabrx);
409 return __set_dabr(dabr, dabrx);
412 static inline int set_dawr(struct arch_hw_breakpoint *brk)
414 unsigned long dawr, dawrx, mrd;
418 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
419 << (63 - 58); //* read/write bits */
420 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
421 << (63 - 59); //* translate */
422 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
423 >> 3; //* PRIM bits */
424 /* dawr length is stored in field MDR bits 48:53. Matches range in
425 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
427 brk->len is in bytes.
428 This aligns up to double word size, shifts and does the bias.
430 mrd = ((brk->len + 7) >> 3) - 1;
431 dawrx |= (mrd & 0x3f) << (63 - 53);
434 return ppc_md.set_dawr(dawr, dawrx);
435 mtspr(SPRN_DAWR, dawr);
436 mtspr(SPRN_DAWRX, dawrx);
440 void __set_breakpoint(struct arch_hw_breakpoint *brk)
442 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk));
444 if (cpu_has_feature(CPU_FTR_DAWR))
450 void set_breakpoint(struct arch_hw_breakpoint *brk)
453 __set_breakpoint(brk);
458 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
461 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
462 struct arch_hw_breakpoint *b)
464 if (a->address != b->address)
466 if (a->type != b->type)
468 if (a->len != b->len)
473 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
474 static void tm_reclaim_thread(struct thread_struct *thr,
475 struct thread_info *ti, uint8_t cause)
477 unsigned long msr_diff = 0;
480 * If FP/VSX registers have been already saved to the
481 * thread_struct, move them to the transact_fp array.
482 * We clear the TIF_RESTORE_TM bit since after the reclaim
483 * the thread will no longer be transactional.
485 if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
486 msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr;
487 if (msr_diff & MSR_FP)
488 memcpy(&thr->transact_fp, &thr->fp_state,
489 sizeof(struct thread_fp_state));
490 if (msr_diff & MSR_VEC)
491 memcpy(&thr->transact_vr, &thr->vr_state,
492 sizeof(struct thread_vr_state));
493 clear_ti_thread_flag(ti, TIF_RESTORE_TM);
494 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
497 tm_reclaim(thr, thr->regs->msr, cause);
499 /* Having done the reclaim, we now have the checkpointed
500 * FP/VSX values in the registers. These might be valid
501 * even if we have previously called enable_kernel_fp() or
502 * flush_fp_to_thread(), so update thr->regs->msr to
503 * indicate their current validity.
505 thr->regs->msr |= msr_diff;
508 void tm_reclaim_current(uint8_t cause)
511 tm_reclaim_thread(¤t->thread, current_thread_info(), cause);
514 static inline void tm_reclaim_task(struct task_struct *tsk)
516 /* We have to work out if we're switching from/to a task that's in the
517 * middle of a transaction.
519 * In switching we need to maintain a 2nd register state as
520 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
521 * checkpointed (tbegin) state in ckpt_regs and saves the transactional
522 * (current) FPRs into oldtask->thread.transact_fpr[].
524 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
526 struct thread_struct *thr = &tsk->thread;
531 if (!MSR_TM_ACTIVE(thr->regs->msr))
532 goto out_and_saveregs;
534 /* Stash the original thread MSR, as giveup_fpu et al will
535 * modify it. We hold onto it to see whether the task used
536 * FP & vector regs. If the TIF_RESTORE_TM flag is set,
537 * ckpt_regs.msr is already set.
539 if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
540 thr->ckpt_regs.msr = thr->regs->msr;
542 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
543 "ccr=%lx, msr=%lx, trap=%lx)\n",
544 tsk->pid, thr->regs->nip,
545 thr->regs->ccr, thr->regs->msr,
548 tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
550 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
554 /* Always save the regs here, even if a transaction's not active.
555 * This context-switches a thread's TM info SPRs. We do it here to
556 * be consistent with the restore path (in recheckpoint) which
557 * cannot happen later in _switch().
562 extern void __tm_recheckpoint(struct thread_struct *thread,
563 unsigned long orig_msr);
565 void tm_recheckpoint(struct thread_struct *thread,
566 unsigned long orig_msr)
570 /* We really can't be interrupted here as the TEXASR registers can't
571 * change and later in the trecheckpoint code, we have a userspace R1.
572 * So let's hard disable over this region.
574 local_irq_save(flags);
577 /* The TM SPRs are restored here, so that TEXASR.FS can be set
578 * before the trecheckpoint and no explosion occurs.
580 tm_restore_sprs(thread);
582 __tm_recheckpoint(thread, orig_msr);
584 local_irq_restore(flags);
587 static inline void tm_recheckpoint_new_task(struct task_struct *new)
591 if (!cpu_has_feature(CPU_FTR_TM))
594 /* Recheckpoint the registers of the thread we're about to switch to.
596 * If the task was using FP, we non-lazily reload both the original and
597 * the speculative FP register states. This is because the kernel
598 * doesn't see if/when a TM rollback occurs, so if we take an FP
599 * unavoidable later, we are unable to determine which set of FP regs
600 * need to be restored.
602 if (!new->thread.regs)
605 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
606 tm_restore_sprs(&new->thread);
609 msr = new->thread.ckpt_regs.msr;
610 /* Recheckpoint to restore original checkpointed register state. */
611 TM_DEBUG("*** tm_recheckpoint of pid %d "
612 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
613 new->pid, new->thread.regs->msr, msr);
615 /* This loads the checkpointed FP/VEC state, if used */
616 tm_recheckpoint(&new->thread, msr);
618 /* This loads the speculative FP/VEC state, if used */
620 do_load_up_transact_fpu(&new->thread);
621 new->thread.regs->msr |=
622 (MSR_FP | new->thread.fpexc_mode);
624 #ifdef CONFIG_ALTIVEC
626 do_load_up_transact_altivec(&new->thread);
627 new->thread.regs->msr |= MSR_VEC;
630 /* We may as well turn on VSX too since all the state is restored now */
632 new->thread.regs->msr |= MSR_VSX;
634 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
635 "(kernel msr 0x%lx)\n",
639 static inline void __switch_to_tm(struct task_struct *prev)
641 if (cpu_has_feature(CPU_FTR_TM)) {
643 tm_reclaim_task(prev);
648 * This is called if we are on the way out to userspace and the
649 * TIF_RESTORE_TM flag is set. It checks if we need to reload
650 * FP and/or vector state and does so if necessary.
651 * If userspace is inside a transaction (whether active or
652 * suspended) and FP/VMX/VSX instructions have ever been enabled
653 * inside that transaction, then we have to keep them enabled
654 * and keep the FP/VMX/VSX state loaded while ever the transaction
655 * continues. The reason is that if we didn't, and subsequently
656 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
657 * we don't know whether it's the same transaction, and thus we
658 * don't know which of the checkpointed state and the transactional
661 void restore_tm_state(struct pt_regs *regs)
663 unsigned long msr_diff;
665 clear_thread_flag(TIF_RESTORE_TM);
666 if (!MSR_TM_ACTIVE(regs->msr))
669 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
670 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
671 if (msr_diff & MSR_FP) {
673 load_fp_state(¤t->thread.fp_state);
674 regs->msr |= current->thread.fpexc_mode;
676 if (msr_diff & MSR_VEC) {
678 load_vr_state(¤t->thread.vr_state);
680 regs->msr |= msr_diff;
684 #define tm_recheckpoint_new_task(new)
685 #define __switch_to_tm(prev)
686 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
688 static inline void save_sprs(struct thread_struct *t)
690 #ifdef CONFIG_ALTIVEC
691 if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
692 t->vrsave = mfspr(SPRN_VRSAVE);
694 #ifdef CONFIG_PPC_BOOK3S_64
695 if (cpu_has_feature(CPU_FTR_DSCR))
696 t->dscr = mfspr(SPRN_DSCR);
698 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
699 t->bescr = mfspr(SPRN_BESCR);
700 t->ebbhr = mfspr(SPRN_EBBHR);
701 t->ebbrr = mfspr(SPRN_EBBRR);
703 t->fscr = mfspr(SPRN_FSCR);
706 * Note that the TAR is not available for use in the kernel.
707 * (To provide this, the TAR should be backed up/restored on
708 * exception entry/exit instead, and be in pt_regs. FIXME,
709 * this should be in pt_regs anyway (for debug).)
711 t->tar = mfspr(SPRN_TAR);
716 static inline void restore_sprs(struct thread_struct *old_thread,
717 struct thread_struct *new_thread)
719 #ifdef CONFIG_ALTIVEC
720 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
721 old_thread->vrsave != new_thread->vrsave)
722 mtspr(SPRN_VRSAVE, new_thread->vrsave);
724 #ifdef CONFIG_PPC_BOOK3S_64
725 if (cpu_has_feature(CPU_FTR_DSCR)) {
726 u64 dscr = get_paca()->dscr_default;
727 u64 fscr = old_thread->fscr & ~FSCR_DSCR;
729 if (new_thread->dscr_inherit) {
730 dscr = new_thread->dscr;
734 if (old_thread->dscr != dscr)
735 mtspr(SPRN_DSCR, dscr);
737 if (old_thread->fscr != fscr)
738 mtspr(SPRN_FSCR, fscr);
741 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
742 if (old_thread->bescr != new_thread->bescr)
743 mtspr(SPRN_BESCR, new_thread->bescr);
744 if (old_thread->ebbhr != new_thread->ebbhr)
745 mtspr(SPRN_EBBHR, new_thread->ebbhr);
746 if (old_thread->ebbrr != new_thread->ebbrr)
747 mtspr(SPRN_EBBRR, new_thread->ebbrr);
749 if (old_thread->tar != new_thread->tar)
750 mtspr(SPRN_TAR, new_thread->tar);
755 struct task_struct *__switch_to(struct task_struct *prev,
756 struct task_struct *new)
758 struct thread_struct *new_thread, *old_thread;
759 struct task_struct *last;
760 #ifdef CONFIG_PPC_BOOK3S_64
761 struct ppc64_tlb_batch *batch;
764 new_thread = &new->thread;
765 old_thread = ¤t->thread;
767 WARN_ON(!irqs_disabled());
770 * We need to save SPRs before treclaim/trecheckpoint as these will
771 * change a number of them.
773 save_sprs(&prev->thread);
775 __switch_to_tm(prev);
777 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
779 #ifdef CONFIG_ALTIVEC
780 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
781 giveup_altivec(prev);
782 #endif /* CONFIG_ALTIVEC */
784 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
785 /* VMX and FPU registers are already save here */
787 #endif /* CONFIG_VSX */
789 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
791 #endif /* CONFIG_SPE */
793 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
794 switch_booke_debug_regs(&new->thread.debug);
797 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
800 #ifndef CONFIG_HAVE_HW_BREAKPOINT
801 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
802 __set_breakpoint(&new->thread.hw_brk);
803 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
808 * Collect processor utilization data per process
810 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
811 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
812 long unsigned start_tb, current_tb;
813 start_tb = old_thread->start_tb;
814 cu->current_tb = current_tb = mfspr(SPRN_PURR);
815 old_thread->accum_tb += (current_tb - start_tb);
816 new_thread->start_tb = current_tb;
818 #endif /* CONFIG_PPC64 */
820 #ifdef CONFIG_PPC_BOOK3S_64
821 batch = this_cpu_ptr(&ppc64_tlb_batch);
823 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
825 __flush_tlb_pending(batch);
828 #endif /* CONFIG_PPC_BOOK3S_64 */
831 * We can't take a PMU exception inside _switch() since there is a
832 * window where the kernel stack SLB and the kernel stack are out
833 * of sync. Hard disable here.
837 tm_recheckpoint_new_task(new);
839 last = _switch(old_thread, new_thread);
841 /* Need to recalculate these after calling _switch() */
842 old_thread = &last->thread;
843 new_thread = ¤t->thread;
845 #ifdef CONFIG_PPC_BOOK3S_64
846 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
847 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
848 batch = this_cpu_ptr(&ppc64_tlb_batch);
851 #endif /* CONFIG_PPC_BOOK3S_64 */
853 restore_sprs(old_thread, new_thread);
858 static int instructions_to_print = 16;
860 static void show_instructions(struct pt_regs *regs)
863 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
866 printk("Instruction dump:");
868 for (i = 0; i < instructions_to_print; i++) {
874 #if !defined(CONFIG_BOOKE)
875 /* If executing with the IMMU off, adjust pc rather
876 * than print XXXXXXXX.
878 if (!(regs->msr & MSR_IR))
879 pc = (unsigned long)phys_to_virt(pc);
882 if (!__kernel_text_address(pc) ||
883 probe_kernel_address((unsigned int __user *)pc, instr)) {
884 printk(KERN_CONT "XXXXXXXX ");
887 printk(KERN_CONT "<%08x> ", instr);
889 printk(KERN_CONT "%08x ", instr);
898 static struct regbit {
902 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
931 static void printbits(unsigned long val, struct regbit *bits)
933 const char *sep = "";
936 for (; bits->bit; ++bits)
937 if (val & bits->bit) {
938 printk("%s%s", sep, bits->name);
946 #define REGS_PER_LINE 4
947 #define LAST_VOLATILE 13
950 #define REGS_PER_LINE 8
951 #define LAST_VOLATILE 12
954 void show_regs(struct pt_regs * regs)
958 show_regs_print_info(KERN_DEFAULT);
960 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
961 regs->nip, regs->link, regs->ctr);
962 printk("REGS: %p TRAP: %04lx %s (%s)\n",
963 regs, regs->trap, print_tainted(), init_utsname()->release);
964 printk("MSR: "REG" ", regs->msr);
965 printbits(regs->msr, msr_bits);
966 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
968 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
969 printk("CFAR: "REG" ", regs->orig_gpr3);
970 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
971 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
972 printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
974 printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
977 printk("SOFTE: %ld ", regs->softe);
979 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
980 if (MSR_TM_ACTIVE(regs->msr))
981 printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
984 for (i = 0; i < 32; i++) {
985 if ((i % REGS_PER_LINE) == 0)
986 printk("\nGPR%02d: ", i);
987 printk(REG " ", regs->gpr[i]);
988 if (i == LAST_VOLATILE && !FULL_REGS(regs))
992 #ifdef CONFIG_KALLSYMS
994 * Lookup NIP late so we have the best change of getting the
995 * above info out without failing
997 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
998 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1000 show_stack(current, (unsigned long *) regs->gpr[1]);
1001 if (!user_mode(regs))
1002 show_instructions(regs);
1005 void exit_thread(void)
1009 void flush_thread(void)
1011 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1012 flush_ptrace_hw_breakpoint(current);
1013 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1014 set_debug_reg_defaults(¤t->thread);
1015 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1019 release_thread(struct task_struct *t)
1024 * this gets called so that we can store coprocessor state into memory and
1025 * copy the current task into the new thread.
1027 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1029 flush_fp_to_thread(src);
1030 flush_altivec_to_thread(src);
1031 flush_vsx_to_thread(src);
1032 flush_spe_to_thread(src);
1034 * Flush TM state out so we can copy it. __switch_to_tm() does this
1035 * flush but it removes the checkpointed state from the current CPU and
1036 * transitions the CPU out of TM mode. Hence we need to call
1037 * tm_recheckpoint_new_task() (on the same task) to restore the
1038 * checkpointed state back and the TM mode.
1040 __switch_to_tm(src);
1041 tm_recheckpoint_new_task(src);
1045 clear_task_ebb(dst);
1050 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1052 #ifdef CONFIG_PPC_STD_MMU_64
1053 unsigned long sp_vsid;
1054 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1056 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1057 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1058 << SLB_VSID_SHIFT_1T;
1060 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1062 sp_vsid |= SLB_VSID_KERNEL | llp;
1063 p->thread.ksp_vsid = sp_vsid;
1072 * Copy architecture-specific thread state
1074 int copy_thread(unsigned long clone_flags, unsigned long usp,
1075 unsigned long kthread_arg, struct task_struct *p)
1077 struct pt_regs *childregs, *kregs;
1078 extern void ret_from_fork(void);
1079 extern void ret_from_kernel_thread(void);
1081 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1083 /* Copy registers */
1084 sp -= sizeof(struct pt_regs);
1085 childregs = (struct pt_regs *) sp;
1086 if (unlikely(p->flags & PF_KTHREAD)) {
1088 struct thread_info *ti = (void *)task_stack_page(p);
1089 memset(childregs, 0, sizeof(struct pt_regs));
1090 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1093 childregs->gpr[14] = ppc_function_entry((void *)usp);
1095 clear_tsk_thread_flag(p, TIF_32BIT);
1096 childregs->softe = 1;
1098 childregs->gpr[15] = kthread_arg;
1099 p->thread.regs = NULL; /* no user register state */
1100 ti->flags |= _TIF_RESTOREALL;
1101 f = ret_from_kernel_thread;
1104 struct pt_regs *regs = current_pt_regs();
1105 CHECK_FULL_REGS(regs);
1108 childregs->gpr[1] = usp;
1109 p->thread.regs = childregs;
1110 childregs->gpr[3] = 0; /* Result from fork() */
1111 if (clone_flags & CLONE_SETTLS) {
1113 if (!is_32bit_task())
1114 childregs->gpr[13] = childregs->gpr[6];
1117 childregs->gpr[2] = childregs->gpr[6];
1122 sp -= STACK_FRAME_OVERHEAD;
1125 * The way this works is that at some point in the future
1126 * some task will call _switch to switch to the new task.
1127 * That will pop off the stack frame created below and start
1128 * the new task running at ret_from_fork. The new task will
1129 * do some house keeping and then return from the fork or clone
1130 * system call, using the stack frame created above.
1132 ((unsigned long *)sp)[0] = 0;
1133 sp -= sizeof(struct pt_regs);
1134 kregs = (struct pt_regs *) sp;
1135 sp -= STACK_FRAME_OVERHEAD;
1138 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1139 _ALIGN_UP(sizeof(struct thread_info), 16);
1141 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1142 p->thread.ptrace_bps[0] = NULL;
1145 p->thread.fp_save_area = NULL;
1146 #ifdef CONFIG_ALTIVEC
1147 p->thread.vr_save_area = NULL;
1150 setup_ksp_vsid(p, sp);
1153 if (cpu_has_feature(CPU_FTR_DSCR)) {
1154 p->thread.dscr_inherit = current->thread.dscr_inherit;
1155 p->thread.dscr = current->thread.dscr;
1157 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1158 p->thread.ppr = INIT_PPR;
1160 kregs->nip = ppc_function_entry(f);
1165 * Set up a thread for executing a new program
1167 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1170 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1174 * If we exec out of a kernel thread then thread.regs will not be
1177 if (!current->thread.regs) {
1178 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1179 current->thread.regs = regs - 1;
1182 memset(regs->gpr, 0, sizeof(regs->gpr));
1190 * We have just cleared all the nonvolatile GPRs, so make
1191 * FULL_REGS(regs) return true. This is necessary to allow
1192 * ptrace to examine the thread immediately after exec.
1199 regs->msr = MSR_USER;
1201 if (!is_32bit_task()) {
1202 unsigned long entry;
1204 if (is_elf2_task()) {
1205 /* Look ma, no function descriptors! */
1210 * The latest iteration of the ABI requires that when
1211 * calling a function (at its global entry point),
1212 * the caller must ensure r12 holds the entry point
1213 * address (so that the function can quickly
1214 * establish addressability).
1216 regs->gpr[12] = start;
1217 /* Make sure that's restored on entry to userspace. */
1218 set_thread_flag(TIF_RESTOREALL);
1222 /* start is a relocated pointer to the function
1223 * descriptor for the elf _start routine. The first
1224 * entry in the function descriptor is the entry
1225 * address of _start and the second entry is the TOC
1226 * value we need to use.
1228 __get_user(entry, (unsigned long __user *)start);
1229 __get_user(toc, (unsigned long __user *)start+1);
1231 /* Check whether the e_entry function descriptor entries
1232 * need to be relocated before we can use them.
1234 if (load_addr != 0) {
1241 regs->msr = MSR_USER64;
1245 regs->msr = MSR_USER32;
1249 current->thread.used_vsr = 0;
1251 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1252 current->thread.fp_save_area = NULL;
1253 #ifdef CONFIG_ALTIVEC
1254 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1255 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1256 current->thread.vr_save_area = NULL;
1257 current->thread.vrsave = 0;
1258 current->thread.used_vr = 0;
1259 #endif /* CONFIG_ALTIVEC */
1261 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1262 current->thread.acc = 0;
1263 current->thread.spefscr = 0;
1264 current->thread.used_spe = 0;
1265 #endif /* CONFIG_SPE */
1266 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1267 if (cpu_has_feature(CPU_FTR_TM))
1268 regs->msr |= MSR_TM;
1269 current->thread.tm_tfhar = 0;
1270 current->thread.tm_texasr = 0;
1271 current->thread.tm_tfiar = 0;
1272 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1274 EXPORT_SYMBOL(start_thread);
1276 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1277 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1279 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1281 struct pt_regs *regs = tsk->thread.regs;
1283 /* This is a bit hairy. If we are an SPE enabled processor
1284 * (have embedded fp) we store the IEEE exception enable flags in
1285 * fpexc_mode. fpexc_mode is also used for setting FP exception
1286 * mode (asyn, precise, disabled) for 'Classic' FP. */
1287 if (val & PR_FP_EXC_SW_ENABLE) {
1289 if (cpu_has_feature(CPU_FTR_SPE)) {
1291 * When the sticky exception bits are set
1292 * directly by userspace, it must call prctl
1293 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1294 * in the existing prctl settings) or
1295 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1296 * the bits being set). <fenv.h> functions
1297 * saving and restoring the whole
1298 * floating-point environment need to do so
1299 * anyway to restore the prctl settings from
1300 * the saved environment.
1302 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1303 tsk->thread.fpexc_mode = val &
1304 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1314 /* on a CONFIG_SPE this does not hurt us. The bits that
1315 * __pack_fe01 use do not overlap with bits used for
1316 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1317 * on CONFIG_SPE implementations are reserved so writing to
1318 * them does not change anything */
1319 if (val > PR_FP_EXC_PRECISE)
1321 tsk->thread.fpexc_mode = __pack_fe01(val);
1322 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1323 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1324 | tsk->thread.fpexc_mode;
1328 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1332 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1334 if (cpu_has_feature(CPU_FTR_SPE)) {
1336 * When the sticky exception bits are set
1337 * directly by userspace, it must call prctl
1338 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1339 * in the existing prctl settings) or
1340 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1341 * the bits being set). <fenv.h> functions
1342 * saving and restoring the whole
1343 * floating-point environment need to do so
1344 * anyway to restore the prctl settings from
1345 * the saved environment.
1347 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1348 val = tsk->thread.fpexc_mode;
1355 val = __unpack_fe01(tsk->thread.fpexc_mode);
1356 return put_user(val, (unsigned int __user *) adr);
1359 int set_endian(struct task_struct *tsk, unsigned int val)
1361 struct pt_regs *regs = tsk->thread.regs;
1363 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1364 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1370 if (val == PR_ENDIAN_BIG)
1371 regs->msr &= ~MSR_LE;
1372 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1373 regs->msr |= MSR_LE;
1380 int get_endian(struct task_struct *tsk, unsigned long adr)
1382 struct pt_regs *regs = tsk->thread.regs;
1385 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1386 !cpu_has_feature(CPU_FTR_REAL_LE))
1392 if (regs->msr & MSR_LE) {
1393 if (cpu_has_feature(CPU_FTR_REAL_LE))
1394 val = PR_ENDIAN_LITTLE;
1396 val = PR_ENDIAN_PPC_LITTLE;
1398 val = PR_ENDIAN_BIG;
1400 return put_user(val, (unsigned int __user *)adr);
1403 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1405 tsk->thread.align_ctl = val;
1409 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1411 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1414 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1415 unsigned long nbytes)
1417 unsigned long stack_page;
1418 unsigned long cpu = task_cpu(p);
1421 * Avoid crashing if the stack has overflowed and corrupted
1422 * task_cpu(p), which is in the thread_info struct.
1424 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1425 stack_page = (unsigned long) hardirq_ctx[cpu];
1426 if (sp >= stack_page + sizeof(struct thread_struct)
1427 && sp <= stack_page + THREAD_SIZE - nbytes)
1430 stack_page = (unsigned long) softirq_ctx[cpu];
1431 if (sp >= stack_page + sizeof(struct thread_struct)
1432 && sp <= stack_page + THREAD_SIZE - nbytes)
1438 int validate_sp(unsigned long sp, struct task_struct *p,
1439 unsigned long nbytes)
1441 unsigned long stack_page = (unsigned long)task_stack_page(p);
1443 if (sp >= stack_page + sizeof(struct thread_struct)
1444 && sp <= stack_page + THREAD_SIZE - nbytes)
1447 return valid_irq_stack(sp, p, nbytes);
1450 EXPORT_SYMBOL(validate_sp);
1452 unsigned long get_wchan(struct task_struct *p)
1454 unsigned long ip, sp;
1457 if (!p || p == current || p->state == TASK_RUNNING)
1461 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1465 sp = *(unsigned long *)sp;
1466 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1469 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1470 if (!in_sched_functions(ip))
1473 } while (count++ < 16);
1477 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1479 void show_stack(struct task_struct *tsk, unsigned long *stack)
1481 unsigned long sp, ip, lr, newsp;
1484 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1485 int curr_frame = current->curr_ret_stack;
1486 extern void return_to_handler(void);
1487 unsigned long rth = (unsigned long)return_to_handler;
1490 sp = (unsigned long) stack;
1495 sp = current_stack_pointer();
1497 sp = tsk->thread.ksp;
1501 printk("Call Trace:\n");
1503 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1506 stack = (unsigned long *) sp;
1508 ip = stack[STACK_FRAME_LR_SAVE];
1509 if (!firstframe || ip != lr) {
1510 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1511 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1512 if ((ip == rth) && curr_frame >= 0) {
1514 (void *)current->ret_stack[curr_frame].ret);
1519 printk(" (unreliable)");
1525 * See if this is an exception frame.
1526 * We look for the "regshere" marker in the current frame.
1528 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1529 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1530 struct pt_regs *regs = (struct pt_regs *)
1531 (sp + STACK_FRAME_OVERHEAD);
1533 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
1534 regs->trap, (void *)regs->nip, (void *)lr);
1539 } while (count++ < kstack_depth_to_print);
1543 /* Called with hard IRQs off */
1544 void notrace __ppc64_runlatch_on(void)
1546 struct thread_info *ti = current_thread_info();
1549 ctrl = mfspr(SPRN_CTRLF);
1550 ctrl |= CTRL_RUNLATCH;
1551 mtspr(SPRN_CTRLT, ctrl);
1553 ti->local_flags |= _TLF_RUNLATCH;
1556 /* Called with hard IRQs off */
1557 void notrace __ppc64_runlatch_off(void)
1559 struct thread_info *ti = current_thread_info();
1562 ti->local_flags &= ~_TLF_RUNLATCH;
1564 ctrl = mfspr(SPRN_CTRLF);
1565 ctrl &= ~CTRL_RUNLATCH;
1566 mtspr(SPRN_CTRLT, ctrl);
1568 #endif /* CONFIG_PPC64 */
1570 unsigned long arch_align_stack(unsigned long sp)
1572 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1573 sp -= get_random_int() & ~PAGE_MASK;
1577 static inline unsigned long brk_rnd(void)
1579 unsigned long rnd = 0;
1581 /* 8MB for 32bit, 1GB for 64bit */
1582 if (is_32bit_task())
1583 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1585 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1587 return rnd << PAGE_SHIFT;
1590 unsigned long arch_randomize_brk(struct mm_struct *mm)
1592 unsigned long base = mm->brk;
1595 #ifdef CONFIG_PPC_STD_MMU_64
1597 * If we are using 1TB segments and we are allowed to randomise
1598 * the heap, we can put it above 1TB so it is backed by a 1TB
1599 * segment. Otherwise the heap will be in the bottom 1TB
1600 * which always uses 256MB segments and this may result in a
1601 * performance penalty.
1603 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1604 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1607 ret = PAGE_ALIGN(base + brk_rnd());