2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
26 * <prasanna@in.ibm.com> adapted for x86_64 from i386.
27 * 2005-Mar Roland McGrath <roland@redhat.com>
28 * Fixed to handle %rip-relative addressing mode correctly.
29 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
30 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
31 * <prasanna@in.ibm.com> added function-return probes.
32 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
33 * Added function return probes functionality
34 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35 * kprobe-booster and kretprobe-booster for i386.
36 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37 * and kretprobe-booster for x86-64
38 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
40 * unified x86 kprobes code.
42 #include <linux/kprobes.h>
43 #include <linux/ptrace.h>
44 #include <linux/string.h>
45 #include <linux/slab.h>
46 #include <linux/hardirq.h>
47 #include <linux/preempt.h>
48 #include <linux/module.h>
49 #include <linux/kdebug.h>
50 #include <linux/kallsyms.h>
51 #include <linux/ftrace.h>
53 #include <asm/cacheflush.h>
55 #include <asm/pgtable.h>
56 #include <asm/uaccess.h>
57 #include <asm/alternative.h>
59 #include <asm/debugreg.h>
63 void jprobe_return_end(void);
65 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
66 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
68 #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
70 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
71 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
72 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
73 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
74 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
77 * Undefined/reserved opcodes, conditional jump, Opcode Extension
78 * Groups, and some special opcodes can not boost.
79 * This is non-const and volatile to keep gcc from statically
80 * optimizing it out, as variable_test_bit makes gcc think only
81 * *(unsigned long*) is used.
83 static volatile u32 twobyte_is_boostable[256 / 32] = {
84 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
85 /* ---------------------------------------------- */
86 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
87 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
88 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
89 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
90 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
91 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
92 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
93 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
94 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
95 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
96 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
97 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
98 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
99 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
100 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
101 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
102 /* ----------------------------------------------- */
103 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
107 struct kretprobe_blackpoint kretprobe_blacklist[] = {
108 {"__switch_to", }, /* This function switches only current task, but
109 doesn't switch kernel stack.*/
110 {NULL, NULL} /* Terminator */
113 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
115 static nokprobe_inline void
116 __synthesize_relative_insn(void *from, void *to, u8 op)
118 struct __arch_relative_insn {
123 insn = (struct __arch_relative_insn *)from;
124 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
128 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
129 void synthesize_reljump(void *from, void *to)
131 __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
133 NOKPROBE_SYMBOL(synthesize_reljump);
135 /* Insert a call instruction at address 'from', which calls address 'to'.*/
136 void synthesize_relcall(void *from, void *to)
138 __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
140 NOKPROBE_SYMBOL(synthesize_relcall);
143 * Skip the prefixes of the instruction.
145 static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn)
149 attr = inat_get_opcode_attribute((insn_byte_t)*insn);
150 while (inat_is_legacy_prefix(attr)) {
152 attr = inat_get_opcode_attribute((insn_byte_t)*insn);
155 if (inat_is_rex_prefix(attr))
160 NOKPROBE_SYMBOL(skip_prefixes);
163 * Returns non-zero if opcode is boostable.
164 * RIP relative instructions are adjusted at copying time in 64 bits mode
166 int can_boost(kprobe_opcode_t *opcodes)
168 kprobe_opcode_t opcode;
169 kprobe_opcode_t *orig_opcodes = opcodes;
171 if (search_exception_tables((unsigned long)opcodes))
172 return 0; /* Page fault may occur on this address. */
175 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
177 opcode = *(opcodes++);
179 /* 2nd-byte opcode */
180 if (opcode == 0x0f) {
181 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
183 return test_bit(*opcodes,
184 (unsigned long *)twobyte_is_boostable);
187 switch (opcode & 0xf0) {
190 goto retry; /* REX prefix is boostable */
193 if (0x63 < opcode && opcode < 0x67)
194 goto retry; /* prefixes */
195 /* can't boost Address-size override and bound */
196 return (opcode != 0x62 && opcode != 0x67);
198 return 0; /* can't boost conditional jump */
200 /* can't boost software-interruptions */
201 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
203 /* can boost AA* and XLAT */
204 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
206 /* can boost in/out and absolute jmps */
207 return ((opcode & 0x04) || opcode == 0xea);
209 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
210 goto retry; /* lock/rep(ne) prefix */
211 /* clear and set flags are boostable */
212 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
214 /* segment override prefixes are boostable */
215 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
216 goto retry; /* prefixes */
217 /* CS override prefix and call are not boostable */
218 return (opcode != 0x2e && opcode != 0x9a);
223 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
227 kp = get_kprobe((void *)addr);
228 /* There is no probe, return original address */
233 * Basically, kp->ainsn.insn has an original instruction.
234 * However, RIP-relative instruction can not do single-stepping
235 * at different place, __copy_instruction() tweaks the displacement of
236 * that instruction. In that case, we can't recover the instruction
237 * from the kp->ainsn.insn.
239 * On the other hand, kp->opcode has a copy of the first byte of
240 * the probed instruction, which is overwritten by int3. And
241 * the instruction at kp->addr is not modified by kprobes except
242 * for the first byte, we can recover the original instruction
243 * from it and kp->opcode.
245 memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
247 return (unsigned long)buf;
251 * Recover the probed instruction at addr for further analysis.
252 * Caller must lock kprobes by kprobe_mutex, or disable preemption
253 * for preventing to release referencing kprobes.
255 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
257 unsigned long __addr;
259 __addr = __recover_optprobed_insn(buf, addr);
263 return __recover_probed_insn(buf, addr);
266 /* Check if paddr is at an instruction boundary */
267 static int can_probe(unsigned long paddr)
269 unsigned long addr, __addr, offset = 0;
271 kprobe_opcode_t buf[MAX_INSN_SIZE];
273 if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
276 /* Decode instructions */
277 addr = paddr - offset;
278 while (addr < paddr) {
280 * Check if the instruction has been modified by another
281 * kprobe, in which case we replace the breakpoint by the
282 * original instruction in our buffer.
283 * Also, jump optimization will change the breakpoint to
284 * relative-jump. Since the relative-jump itself is
285 * normally used, we just go through if there is no kprobe.
287 __addr = recover_probed_instruction(buf, addr);
288 kernel_insn_init(&insn, (void *)__addr);
289 insn_get_length(&insn);
292 * Another debugging subsystem might insert this breakpoint.
293 * In that case, we can't recover it.
295 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
300 return (addr == paddr);
304 * Returns non-zero if opcode modifies the interrupt flag.
306 static int is_IF_modifier(kprobe_opcode_t *insn)
309 insn = skip_prefixes(insn);
314 case 0xcf: /* iret/iretd */
315 case 0x9d: /* popf/popfd */
323 * Copy an instruction and adjust the displacement if the instruction
324 * uses the %rip-relative addressing mode.
325 * If it does, Return the address of the 32-bit displacement word.
326 * If not, return null.
327 * Only applicable to 64-bit x86.
329 int __copy_instruction(u8 *dest, u8 *src)
332 kprobe_opcode_t buf[MAX_INSN_SIZE];
334 kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src));
335 insn_get_length(&insn);
336 /* Another subsystem puts a breakpoint, failed to recover */
337 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
339 memcpy(dest, insn.kaddr, insn.length);
342 if (insn_rip_relative(&insn)) {
345 kernel_insn_init(&insn, dest);
346 insn_get_displacement(&insn);
348 * The copied instruction uses the %rip-relative addressing
349 * mode. Adjust the displacement for the difference between
350 * the original location of this instruction and the location
351 * of the copy that will actually be run. The tricky bit here
352 * is making sure that the sign extension happens correctly in
353 * this calculation, since we need a signed 32-bit result to
354 * be sign-extended to 64 bits when it's added to the %rip
355 * value and yield the same 64-bit result that the sign-
356 * extension of the original signed 32-bit displacement would
359 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
360 if ((s64) (s32) newdisp != newdisp) {
361 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
362 pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
365 disp = (u8 *) dest + insn_offset_displacement(&insn);
366 *(s32 *) disp = (s32) newdisp;
372 static int arch_copy_kprobe(struct kprobe *p)
376 /* Copy an instruction with recovering if other optprobe modifies it.*/
377 ret = __copy_instruction(p->ainsn.insn, p->addr);
382 * __copy_instruction can modify the displacement of the instruction,
383 * but it doesn't affect boostable check.
385 if (can_boost(p->ainsn.insn))
386 p->ainsn.boostable = 0;
388 p->ainsn.boostable = -1;
390 /* Check whether the instruction modifies Interrupt Flag or not */
391 p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
393 /* Also, displacement change doesn't affect the first byte */
394 p->opcode = p->ainsn.insn[0];
399 int arch_prepare_kprobe(struct kprobe *p)
401 if (alternatives_text_reserved(p->addr, p->addr))
404 if (!can_probe((unsigned long)p->addr))
406 /* insn: must be on special executable page on x86. */
407 p->ainsn.insn = get_insn_slot();
411 return arch_copy_kprobe(p);
414 void arch_arm_kprobe(struct kprobe *p)
416 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
419 void arch_disarm_kprobe(struct kprobe *p)
421 text_poke(p->addr, &p->opcode, 1);
424 void arch_remove_kprobe(struct kprobe *p)
427 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
428 p->ainsn.insn = NULL;
432 static nokprobe_inline void
433 save_previous_kprobe(struct kprobe_ctlblk *kcb)
435 kcb->prev_kprobe.kp = kprobe_running();
436 kcb->prev_kprobe.status = kcb->kprobe_status;
437 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
438 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
441 static nokprobe_inline void
442 restore_previous_kprobe(struct kprobe_ctlblk *kcb)
444 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
445 kcb->kprobe_status = kcb->prev_kprobe.status;
446 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
447 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
450 static nokprobe_inline void
451 set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
452 struct kprobe_ctlblk *kcb)
454 __this_cpu_write(current_kprobe, p);
455 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
456 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
457 if (p->ainsn.if_modifier)
458 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
461 static nokprobe_inline void clear_btf(void)
463 if (test_thread_flag(TIF_BLOCKSTEP)) {
464 unsigned long debugctl = get_debugctlmsr();
466 debugctl &= ~DEBUGCTLMSR_BTF;
467 update_debugctlmsr(debugctl);
471 static nokprobe_inline void restore_btf(void)
473 if (test_thread_flag(TIF_BLOCKSTEP)) {
474 unsigned long debugctl = get_debugctlmsr();
476 debugctl |= DEBUGCTLMSR_BTF;
477 update_debugctlmsr(debugctl);
481 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
483 unsigned long *sara = stack_addr(regs);
485 ri->ret_addr = (kprobe_opcode_t *) *sara;
487 /* Replace the return addr with trampoline addr */
488 *sara = (unsigned long) &kretprobe_trampoline;
490 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
492 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
493 struct kprobe_ctlblk *kcb, int reenter)
495 if (setup_detour_execution(p, regs, reenter))
498 #if !defined(CONFIG_PREEMPT)
499 if (p->ainsn.boostable == 1 && !p->post_handler) {
500 /* Boost up -- we can execute copied instructions directly */
502 reset_current_kprobe();
504 * Reentering boosted probe doesn't reset current_kprobe,
505 * nor set current_kprobe, because it doesn't use single
508 regs->ip = (unsigned long)p->ainsn.insn;
509 preempt_enable_no_resched();
514 save_previous_kprobe(kcb);
515 set_current_kprobe(p, regs, kcb);
516 kcb->kprobe_status = KPROBE_REENTER;
518 kcb->kprobe_status = KPROBE_HIT_SS;
519 /* Prepare real single stepping */
521 regs->flags |= X86_EFLAGS_TF;
522 regs->flags &= ~X86_EFLAGS_IF;
523 /* single step inline if the instruction is an int3 */
524 if (p->opcode == BREAKPOINT_INSTRUCTION)
525 regs->ip = (unsigned long)p->addr;
527 regs->ip = (unsigned long)p->ainsn.insn;
529 NOKPROBE_SYMBOL(setup_singlestep);
532 * We have reentered the kprobe_handler(), since another probe was hit while
533 * within the handler. We save the original kprobes variables and just single
534 * step on the instruction of the new probe without calling any user handlers.
536 static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
537 struct kprobe_ctlblk *kcb)
539 switch (kcb->kprobe_status) {
540 case KPROBE_HIT_SSDONE:
541 case KPROBE_HIT_ACTIVE:
543 kprobes_inc_nmissed_count(p);
544 setup_singlestep(p, regs, kcb, 1);
547 /* A probe has been hit in the codepath leading up to, or just
548 * after, single-stepping of a probed instruction. This entire
549 * codepath should strictly reside in .kprobes.text section.
550 * Raise a BUG or we'll continue in an endless reentering loop
551 * and eventually a stack overflow.
553 printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
558 /* impossible cases */
565 NOKPROBE_SYMBOL(reenter_kprobe);
568 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
569 * remain disabled throughout this function.
571 int kprobe_int3_handler(struct pt_regs *regs)
573 kprobe_opcode_t *addr;
575 struct kprobe_ctlblk *kcb;
577 if (user_mode_vm(regs))
580 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
582 * We don't want to be preempted for the entire
583 * duration of kprobe processing. We conditionally
584 * re-enable preemption at the end of this function,
585 * and also in reenter_kprobe() and setup_singlestep().
589 kcb = get_kprobe_ctlblk();
590 p = get_kprobe(addr);
593 if (kprobe_running()) {
594 if (reenter_kprobe(p, regs, kcb))
597 set_current_kprobe(p, regs, kcb);
598 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
601 * If we have no pre-handler or it returned 0, we
602 * continue with normal processing. If we have a
603 * pre-handler and it returned non-zero, it prepped
604 * for calling the break_handler below on re-entry
605 * for jprobe processing, so get out doing nothing
608 if (!p->pre_handler || !p->pre_handler(p, regs))
609 setup_singlestep(p, regs, kcb, 0);
612 } else if (*addr != BREAKPOINT_INSTRUCTION) {
614 * The breakpoint instruction was removed right
615 * after we hit it. Another cpu has removed
616 * either a probepoint or a debugger breakpoint
617 * at this address. In either case, no further
618 * handling of this interrupt is appropriate.
619 * Back up over the (now missing) int3 and run
620 * the original instruction.
622 regs->ip = (unsigned long)addr;
623 preempt_enable_no_resched();
625 } else if (kprobe_running()) {
626 p = __this_cpu_read(current_kprobe);
627 if (p->break_handler && p->break_handler(p, regs)) {
628 if (!skip_singlestep(p, regs, kcb))
629 setup_singlestep(p, regs, kcb, 0);
632 } /* else: not a kprobe fault; let the kernel handle it */
634 preempt_enable_no_resched();
637 NOKPROBE_SYMBOL(kprobe_int3_handler);
640 * When a retprobed function returns, this code saves registers and
641 * calls trampoline_handler() runs, which calls the kretprobe's handler.
643 static void __used kretprobe_trampoline_holder(void)
646 ".global kretprobe_trampoline\n"
647 "kretprobe_trampoline: \n"
649 /* We don't bother saving the ss register */
654 " call trampoline_handler\n"
655 /* Replace saved sp with true return address. */
656 " movq %rax, 152(%rsp)\n"
663 " call trampoline_handler\n"
664 /* Move flags to cs */
665 " movl 56(%esp), %edx\n"
666 " movl %edx, 52(%esp)\n"
667 /* Replace saved flags with true return address. */
668 " movl %eax, 56(%esp)\n"
674 NOKPROBE_SYMBOL(kretprobe_trampoline_holder);
675 NOKPROBE_SYMBOL(kretprobe_trampoline);
678 * Called from kretprobe_trampoline
680 __visible __used void *trampoline_handler(struct pt_regs *regs)
682 struct kretprobe_instance *ri = NULL;
683 struct hlist_head *head, empty_rp;
684 struct hlist_node *tmp;
685 unsigned long flags, orig_ret_address = 0;
686 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
687 kprobe_opcode_t *correct_ret_addr = NULL;
689 INIT_HLIST_HEAD(&empty_rp);
690 kretprobe_hash_lock(current, &head, &flags);
691 /* fixup registers */
693 regs->cs = __KERNEL_CS;
695 regs->cs = __KERNEL_CS | get_kernel_rpl();
698 regs->ip = trampoline_address;
699 regs->orig_ax = ~0UL;
702 * It is possible to have multiple instances associated with a given
703 * task either because multiple functions in the call path have
704 * return probes installed on them, and/or more than one
705 * return probe was registered for a target function.
707 * We can handle this because:
708 * - instances are always pushed into the head of the list
709 * - when multiple return probes are registered for the same
710 * function, the (chronologically) first instance's ret_addr
711 * will be the real return address, and all the rest will
712 * point to kretprobe_trampoline.
714 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
715 if (ri->task != current)
716 /* another task is sharing our hash bucket */
719 orig_ret_address = (unsigned long)ri->ret_addr;
721 if (orig_ret_address != trampoline_address)
723 * This is the real return address. Any other
724 * instances associated with this task are for
725 * other calls deeper on the call stack
730 kretprobe_assert(ri, orig_ret_address, trampoline_address);
732 correct_ret_addr = ri->ret_addr;
733 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
734 if (ri->task != current)
735 /* another task is sharing our hash bucket */
738 orig_ret_address = (unsigned long)ri->ret_addr;
739 if (ri->rp && ri->rp->handler) {
740 __this_cpu_write(current_kprobe, &ri->rp->kp);
741 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
742 ri->ret_addr = correct_ret_addr;
743 ri->rp->handler(ri, regs);
744 __this_cpu_write(current_kprobe, NULL);
747 recycle_rp_inst(ri, &empty_rp);
749 if (orig_ret_address != trampoline_address)
751 * This is the real return address. Any other
752 * instances associated with this task are for
753 * other calls deeper on the call stack
758 kretprobe_hash_unlock(current, &flags);
760 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
761 hlist_del(&ri->hlist);
764 return (void *)orig_ret_address;
766 NOKPROBE_SYMBOL(trampoline_handler);
769 * Called after single-stepping. p->addr is the address of the
770 * instruction whose first byte has been replaced by the "int 3"
771 * instruction. To avoid the SMP problems that can occur when we
772 * temporarily put back the original opcode to single-step, we
773 * single-stepped a copy of the instruction. The address of this
774 * copy is p->ainsn.insn.
776 * This function prepares to return from the post-single-step
777 * interrupt. We have to fix up the stack as follows:
779 * 0) Except in the case of absolute or indirect jump or call instructions,
780 * the new ip is relative to the copied instruction. We need to make
781 * it relative to the original instruction.
783 * 1) If the single-stepped instruction was pushfl, then the TF and IF
784 * flags are set in the just-pushed flags, and may need to be cleared.
786 * 2) If the single-stepped instruction was a call, the return address
787 * that is atop the stack is the address following the copied instruction.
788 * We need to make it the address following the original instruction.
790 * If this is the first time we've single-stepped the instruction at
791 * this probepoint, and the instruction is boostable, boost it: add a
792 * jump instruction after the copied instruction, that jumps to the next
793 * instruction after the probepoint.
795 static void resume_execution(struct kprobe *p, struct pt_regs *regs,
796 struct kprobe_ctlblk *kcb)
798 unsigned long *tos = stack_addr(regs);
799 unsigned long copy_ip = (unsigned long)p->ainsn.insn;
800 unsigned long orig_ip = (unsigned long)p->addr;
801 kprobe_opcode_t *insn = p->ainsn.insn;
804 insn = skip_prefixes(insn);
806 regs->flags &= ~X86_EFLAGS_TF;
808 case 0x9c: /* pushfl */
809 *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
810 *tos |= kcb->kprobe_old_flags;
812 case 0xc2: /* iret/ret/lret */
817 case 0xea: /* jmp absolute -- ip is correct */
818 /* ip is already adjusted, no more changes required */
819 p->ainsn.boostable = 1;
821 case 0xe8: /* call relative - Fix return addr */
822 *tos = orig_ip + (*tos - copy_ip);
825 case 0x9a: /* call absolute -- same as call absolute, indirect */
826 *tos = orig_ip + (*tos - copy_ip);
830 if ((insn[1] & 0x30) == 0x10) {
832 * call absolute, indirect
833 * Fix return addr; ip is correct.
834 * But this is not boostable
836 *tos = orig_ip + (*tos - copy_ip);
838 } else if (((insn[1] & 0x31) == 0x20) ||
839 ((insn[1] & 0x31) == 0x21)) {
841 * jmp near and far, absolute indirect
842 * ip is correct. And this is boostable
844 p->ainsn.boostable = 1;
851 if (p->ainsn.boostable == 0) {
852 if ((regs->ip > copy_ip) &&
853 (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
855 * These instructions can be executed directly if it
856 * jumps back to correct address.
858 synthesize_reljump((void *)regs->ip,
859 (void *)orig_ip + (regs->ip - copy_ip));
860 p->ainsn.boostable = 1;
862 p->ainsn.boostable = -1;
866 regs->ip += orig_ip - copy_ip;
871 NOKPROBE_SYMBOL(resume_execution);
874 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
875 * remain disabled throughout this function.
877 int kprobe_debug_handler(struct pt_regs *regs)
879 struct kprobe *cur = kprobe_running();
880 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
885 resume_execution(cur, regs, kcb);
886 regs->flags |= kcb->kprobe_saved_flags;
888 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
889 kcb->kprobe_status = KPROBE_HIT_SSDONE;
890 cur->post_handler(cur, regs, 0);
893 /* Restore back the original saved kprobes variables and continue. */
894 if (kcb->kprobe_status == KPROBE_REENTER) {
895 restore_previous_kprobe(kcb);
898 reset_current_kprobe();
900 preempt_enable_no_resched();
903 * if somebody else is singlestepping across a probe point, flags
904 * will have TF set, in which case, continue the remaining processing
905 * of do_debug, as if this is not a probe hit.
907 if (regs->flags & X86_EFLAGS_TF)
912 NOKPROBE_SYMBOL(kprobe_debug_handler);
914 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
916 struct kprobe *cur = kprobe_running();
917 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
919 if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
920 /* This must happen on single-stepping */
921 WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
922 kcb->kprobe_status != KPROBE_REENTER);
924 * We are here because the instruction being single
925 * stepped caused a page fault. We reset the current
926 * kprobe and the ip points back to the probe address
927 * and allow the page fault handler to continue as a
930 regs->ip = (unsigned long)cur->addr;
931 regs->flags |= kcb->kprobe_old_flags;
932 if (kcb->kprobe_status == KPROBE_REENTER)
933 restore_previous_kprobe(kcb);
935 reset_current_kprobe();
936 preempt_enable_no_resched();
937 } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
938 kcb->kprobe_status == KPROBE_HIT_SSDONE) {
940 * We increment the nmissed count for accounting,
941 * we can also use npre/npostfault count for accounting
942 * these specific fault cases.
944 kprobes_inc_nmissed_count(cur);
947 * We come here because instructions in the pre/post
948 * handler caused the page_fault, this could happen
949 * if handler tries to access user space by
950 * copy_from_user(), get_user() etc. Let the
951 * user-specified handler try to fix it first.
953 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
957 * In case the user-specified fault handler returned
958 * zero, try to fix up.
960 if (fixup_exception(regs))
964 * fixup routine could not handle it,
965 * Let do_page_fault() fix it.
971 NOKPROBE_SYMBOL(kprobe_fault_handler);
974 * Wrapper routine for handling exceptions.
976 int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
979 struct die_args *args = data;
980 int ret = NOTIFY_DONE;
982 if (args->regs && user_mode_vm(args->regs))
985 if (val == DIE_GPF) {
987 * To be potentially processing a kprobe fault and to
988 * trust the result from kprobe_running(), we have
989 * be non-preemptible.
991 if (!preemptible() && kprobe_running() &&
992 kprobe_fault_handler(args->regs, args->trapnr))
997 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
999 int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1001 struct jprobe *jp = container_of(p, struct jprobe, kp);
1003 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1005 kcb->jprobe_saved_regs = *regs;
1006 kcb->jprobe_saved_sp = stack_addr(regs);
1007 addr = (unsigned long)(kcb->jprobe_saved_sp);
1010 * As Linus pointed out, gcc assumes that the callee
1011 * owns the argument space and could overwrite it, e.g.
1012 * tailcall optimization. So, to be absolutely safe
1013 * we also save and restore enough stack bytes to cover
1014 * the argument area.
1016 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
1017 MIN_STACK_SIZE(addr));
1018 regs->flags &= ~X86_EFLAGS_IF;
1019 trace_hardirqs_off();
1020 regs->ip = (unsigned long)(jp->entry);
1023 NOKPROBE_SYMBOL(setjmp_pre_handler);
1025 void jprobe_return(void)
1027 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1030 #ifdef CONFIG_X86_64
1031 " xchg %%rbx,%%rsp \n"
1033 " xchgl %%ebx,%%esp \n"
1036 " .globl jprobe_return_end\n"
1037 " jprobe_return_end: \n"
1039 (kcb->jprobe_saved_sp):"memory");
1041 NOKPROBE_SYMBOL(jprobe_return);
1042 NOKPROBE_SYMBOL(jprobe_return_end);
1044 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1046 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1047 u8 *addr = (u8 *) (regs->ip - 1);
1048 struct jprobe *jp = container_of(p, struct jprobe, kp);
1050 if ((addr > (u8 *) jprobe_return) &&
1051 (addr < (u8 *) jprobe_return_end)) {
1052 if (stack_addr(regs) != kcb->jprobe_saved_sp) {
1053 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
1055 "current sp %p does not match saved sp %p\n",
1056 stack_addr(regs), kcb->jprobe_saved_sp);
1057 printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
1058 show_regs(saved_regs);
1059 printk(KERN_ERR "Current registers\n");
1063 *regs = kcb->jprobe_saved_regs;
1064 memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
1066 MIN_STACK_SIZE(kcb->jprobe_saved_sp));
1067 preempt_enable_no_resched();
1072 NOKPROBE_SYMBOL(longjmp_break_handler);
1074 bool arch_within_kprobe_blacklist(unsigned long addr)
1076 return (addr >= (unsigned long)__kprobes_text_start &&
1077 addr < (unsigned long)__kprobes_text_end) ||
1078 (addr >= (unsigned long)__entry_text_start &&
1079 addr < (unsigned long)__entry_text_end);
1082 int __init arch_init_kprobes(void)
1087 int arch_trampoline_kprobe(struct kprobe *p)