d4129bb05e5d044101ba2cbea672f96954b69a51
[cascardo/linux.git] / kernel / events / uprobes.c
1 /*
2  * User-space Probes (UProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2008-2012
19  * Authors:
20  *      Srikar Dronamraju
21  *      Jim Keniston
22  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
23  */
24
25 #include <linux/kernel.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h>      /* read_mapping_page */
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/export.h>
31 #include <linux/rmap.h>         /* anon_vma_prepare */
32 #include <linux/mmu_notifier.h> /* set_pte_at_notify */
33 #include <linux/swap.h>         /* try_to_free_swap */
34 #include <linux/ptrace.h>       /* user_enable_single_step */
35 #include <linux/kdebug.h>       /* notifier mechanism */
36 #include "../../mm/internal.h"  /* munlock_vma_page */
37 #include <linux/percpu-rwsem.h>
38 #include <linux/task_work.h>
39 #include <linux/shmem_fs.h>
40
41 #include <linux/uprobes.h>
42
43 #define UINSNS_PER_PAGE                 (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
44 #define MAX_UPROBE_XOL_SLOTS            UINSNS_PER_PAGE
45
46 static struct rb_root uprobes_tree = RB_ROOT;
47 /*
48  * allows us to skip the uprobe_mmap if there are no uprobe events active
49  * at this time.  Probably a fine grained per inode count is better?
50  */
51 #define no_uprobe_events()      RB_EMPTY_ROOT(&uprobes_tree)
52
53 static DEFINE_SPINLOCK(uprobes_treelock);       /* serialize rbtree access */
54
55 #define UPROBES_HASH_SZ 13
56 /* serialize uprobe->pending_list */
57 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
58 #define uprobes_mmap_hash(v)    (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
59
60 static struct percpu_rw_semaphore dup_mmap_sem;
61
62 /* Have a copy of original instruction */
63 #define UPROBE_COPY_INSN        0
64
65 struct uprobe {
66         struct rb_node          rb_node;        /* node in the rb tree */
67         atomic_t                ref;
68         struct rw_semaphore     register_rwsem;
69         struct rw_semaphore     consumer_rwsem;
70         struct list_head        pending_list;
71         struct uprobe_consumer  *consumers;
72         struct inode            *inode;         /* Also hold a ref to inode */
73         loff_t                  offset;
74         unsigned long           flags;
75
76         /*
77          * The generic code assumes that it has two members of unknown type
78          * owned by the arch-specific code:
79          *
80          *      insn -  copy_insn() saves the original instruction here for
81          *              arch_uprobe_analyze_insn().
82          *
83          *      ixol -  potentially modified instruction to execute out of
84          *              line, copied to xol_area by xol_get_insn_slot().
85          */
86         struct arch_uprobe      arch;
87 };
88
89 /*
90  * Execute out of line area: anonymous executable mapping installed
91  * by the probed task to execute the copy of the original instruction
92  * mangled by set_swbp().
93  *
94  * On a breakpoint hit, thread contests for a slot.  It frees the
95  * slot after singlestep. Currently a fixed number of slots are
96  * allocated.
97  */
98 struct xol_area {
99         wait_queue_head_t               wq;             /* if all slots are busy */
100         atomic_t                        slot_count;     /* number of in-use slots */
101         unsigned long                   *bitmap;        /* 0 = free slot */
102
103         struct vm_special_mapping       xol_mapping;
104         struct page                     *pages[2];
105         /*
106          * We keep the vma's vm_start rather than a pointer to the vma
107          * itself.  The probed process or a naughty kernel module could make
108          * the vma go away, and we must handle that reasonably gracefully.
109          */
110         unsigned long                   vaddr;          /* Page(s) of instruction slots */
111 };
112
113 /*
114  * valid_vma: Verify if the specified vma is an executable vma
115  * Relax restrictions while unregistering: vm_flags might have
116  * changed after breakpoint was inserted.
117  *      - is_register: indicates if we are in register context.
118  *      - Return 1 if the specified virtual address is in an
119  *        executable vma.
120  */
121 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
122 {
123         vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
124
125         if (is_register)
126                 flags |= VM_WRITE;
127
128         return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
129 }
130
131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
132 {
133         return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
134 }
135
136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
137 {
138         return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
139 }
140
141 /**
142  * __replace_page - replace page in vma by new page.
143  * based on replace_page in mm/ksm.c
144  *
145  * @vma:      vma that holds the pte pointing to page
146  * @addr:     address the old @page is mapped at
147  * @page:     the cowed page we are replacing by kpage
148  * @kpage:    the modified page we replace page by
149  *
150  * Returns 0 on success, -EFAULT on failure.
151  */
152 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
153                                 struct page *old_page, struct page *new_page)
154 {
155         struct mm_struct *mm = vma->vm_mm;
156         spinlock_t *ptl;
157         pte_t *ptep;
158         int err;
159         /* For mmu_notifiers */
160         const unsigned long mmun_start = addr;
161         const unsigned long mmun_end   = addr + PAGE_SIZE;
162         struct mem_cgroup *memcg;
163
164         err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
165                         false);
166         if (err)
167                 return err;
168
169         /* For try_to_free_swap() and munlock_vma_page() below */
170         lock_page(old_page);
171
172         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
173         err = -EAGAIN;
174         ptep = page_check_address(old_page, mm, addr, &ptl, 0);
175         if (!ptep) {
176                 mem_cgroup_cancel_charge(new_page, memcg, false);
177                 goto unlock;
178         }
179
180         get_page(new_page);
181         page_add_new_anon_rmap(new_page, vma, addr, false);
182         mem_cgroup_commit_charge(new_page, memcg, false, false);
183         lru_cache_add_active_or_unevictable(new_page, vma);
184
185         if (!PageAnon(old_page)) {
186                 dec_mm_counter(mm, mm_counter_file(old_page));
187                 inc_mm_counter(mm, MM_ANONPAGES);
188         }
189
190         flush_cache_page(vma, addr, pte_pfn(*ptep));
191         ptep_clear_flush_notify(vma, addr, ptep);
192         set_pte_at_notify(mm, addr, ptep, mk_pte(new_page, vma->vm_page_prot));
193
194         page_remove_rmap(old_page, false);
195         if (!page_mapped(old_page))
196                 try_to_free_swap(old_page);
197         pte_unmap_unlock(ptep, ptl);
198
199         if (vma->vm_flags & VM_LOCKED)
200                 munlock_vma_page(old_page);
201         put_page(old_page);
202
203         err = 0;
204  unlock:
205         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
206         unlock_page(old_page);
207         return err;
208 }
209
210 /**
211  * is_swbp_insn - check if instruction is breakpoint instruction.
212  * @insn: instruction to be checked.
213  * Default implementation of is_swbp_insn
214  * Returns true if @insn is a breakpoint instruction.
215  */
216 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
217 {
218         return *insn == UPROBE_SWBP_INSN;
219 }
220
221 /**
222  * is_trap_insn - check if instruction is breakpoint instruction.
223  * @insn: instruction to be checked.
224  * Default implementation of is_trap_insn
225  * Returns true if @insn is a breakpoint instruction.
226  *
227  * This function is needed for the case where an architecture has multiple
228  * trap instructions (like powerpc).
229  */
230 bool __weak is_trap_insn(uprobe_opcode_t *insn)
231 {
232         return is_swbp_insn(insn);
233 }
234
235 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
236 {
237         void *kaddr = kmap_atomic(page);
238         memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
239         kunmap_atomic(kaddr);
240 }
241
242 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
243 {
244         void *kaddr = kmap_atomic(page);
245         memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
246         kunmap_atomic(kaddr);
247 }
248
249 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
250 {
251         uprobe_opcode_t old_opcode;
252         bool is_swbp;
253
254         /*
255          * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
256          * We do not check if it is any other 'trap variant' which could
257          * be conditional trap instruction such as the one powerpc supports.
258          *
259          * The logic is that we do not care if the underlying instruction
260          * is a trap variant; uprobes always wins over any other (gdb)
261          * breakpoint.
262          */
263         copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
264         is_swbp = is_swbp_insn(&old_opcode);
265
266         if (is_swbp_insn(new_opcode)) {
267                 if (is_swbp)            /* register: already installed? */
268                         return 0;
269         } else {
270                 if (!is_swbp)           /* unregister: was it changed by us? */
271                         return 0;
272         }
273
274         return 1;
275 }
276
277 /*
278  * NOTE:
279  * Expect the breakpoint instruction to be the smallest size instruction for
280  * the architecture. If an arch has variable length instruction and the
281  * breakpoint instruction is not of the smallest length instruction
282  * supported by that architecture then we need to modify is_trap_at_addr and
283  * uprobe_write_opcode accordingly. This would never be a problem for archs
284  * that have fixed length instructions.
285  *
286  * uprobe_write_opcode - write the opcode at a given virtual address.
287  * @mm: the probed process address space.
288  * @vaddr: the virtual address to store the opcode.
289  * @opcode: opcode to be written at @vaddr.
290  *
291  * Called with mm->mmap_sem held for write.
292  * Return 0 (success) or a negative errno.
293  */
294 int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
295                         uprobe_opcode_t opcode)
296 {
297         struct page *old_page, *new_page;
298         struct vm_area_struct *vma;
299         int ret;
300
301 retry:
302         /* Read the page with vaddr into memory */
303         ret = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
304         if (ret <= 0)
305                 return ret;
306
307         ret = verify_opcode(old_page, vaddr, &opcode);
308         if (ret <= 0)
309                 goto put_old;
310
311         ret = anon_vma_prepare(vma);
312         if (ret)
313                 goto put_old;
314
315         ret = -ENOMEM;
316         new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
317         if (!new_page)
318                 goto put_old;
319
320         __SetPageUptodate(new_page);
321         copy_highpage(new_page, old_page);
322         copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
323
324         ret = __replace_page(vma, vaddr, old_page, new_page);
325         put_page(new_page);
326 put_old:
327         put_page(old_page);
328
329         if (unlikely(ret == -EAGAIN))
330                 goto retry;
331         return ret;
332 }
333
334 /**
335  * set_swbp - store breakpoint at a given address.
336  * @auprobe: arch specific probepoint information.
337  * @mm: the probed process address space.
338  * @vaddr: the virtual address to insert the opcode.
339  *
340  * For mm @mm, store the breakpoint instruction at @vaddr.
341  * Return 0 (success) or a negative errno.
342  */
343 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
344 {
345         return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
346 }
347
348 /**
349  * set_orig_insn - Restore the original instruction.
350  * @mm: the probed process address space.
351  * @auprobe: arch specific probepoint information.
352  * @vaddr: the virtual address to insert the opcode.
353  *
354  * For mm @mm, restore the original opcode (opcode) at @vaddr.
355  * Return 0 (success) or a negative errno.
356  */
357 int __weak
358 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
359 {
360         return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
361 }
362
363 static struct uprobe *get_uprobe(struct uprobe *uprobe)
364 {
365         atomic_inc(&uprobe->ref);
366         return uprobe;
367 }
368
369 static void put_uprobe(struct uprobe *uprobe)
370 {
371         if (atomic_dec_and_test(&uprobe->ref))
372                 kfree(uprobe);
373 }
374
375 static int match_uprobe(struct uprobe *l, struct uprobe *r)
376 {
377         if (l->inode < r->inode)
378                 return -1;
379
380         if (l->inode > r->inode)
381                 return 1;
382
383         if (l->offset < r->offset)
384                 return -1;
385
386         if (l->offset > r->offset)
387                 return 1;
388
389         return 0;
390 }
391
392 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
393 {
394         struct uprobe u = { .inode = inode, .offset = offset };
395         struct rb_node *n = uprobes_tree.rb_node;
396         struct uprobe *uprobe;
397         int match;
398
399         while (n) {
400                 uprobe = rb_entry(n, struct uprobe, rb_node);
401                 match = match_uprobe(&u, uprobe);
402                 if (!match)
403                         return get_uprobe(uprobe);
404
405                 if (match < 0)
406                         n = n->rb_left;
407                 else
408                         n = n->rb_right;
409         }
410         return NULL;
411 }
412
413 /*
414  * Find a uprobe corresponding to a given inode:offset
415  * Acquires uprobes_treelock
416  */
417 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
418 {
419         struct uprobe *uprobe;
420
421         spin_lock(&uprobes_treelock);
422         uprobe = __find_uprobe(inode, offset);
423         spin_unlock(&uprobes_treelock);
424
425         return uprobe;
426 }
427
428 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
429 {
430         struct rb_node **p = &uprobes_tree.rb_node;
431         struct rb_node *parent = NULL;
432         struct uprobe *u;
433         int match;
434
435         while (*p) {
436                 parent = *p;
437                 u = rb_entry(parent, struct uprobe, rb_node);
438                 match = match_uprobe(uprobe, u);
439                 if (!match)
440                         return get_uprobe(u);
441
442                 if (match < 0)
443                         p = &parent->rb_left;
444                 else
445                         p = &parent->rb_right;
446
447         }
448
449         u = NULL;
450         rb_link_node(&uprobe->rb_node, parent, p);
451         rb_insert_color(&uprobe->rb_node, &uprobes_tree);
452         /* get access + creation ref */
453         atomic_set(&uprobe->ref, 2);
454
455         return u;
456 }
457
458 /*
459  * Acquire uprobes_treelock.
460  * Matching uprobe already exists in rbtree;
461  *      increment (access refcount) and return the matching uprobe.
462  *
463  * No matching uprobe; insert the uprobe in rb_tree;
464  *      get a double refcount (access + creation) and return NULL.
465  */
466 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
467 {
468         struct uprobe *u;
469
470         spin_lock(&uprobes_treelock);
471         u = __insert_uprobe(uprobe);
472         spin_unlock(&uprobes_treelock);
473
474         return u;
475 }
476
477 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
478 {
479         struct uprobe *uprobe, *cur_uprobe;
480
481         uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
482         if (!uprobe)
483                 return NULL;
484
485         uprobe->inode = igrab(inode);
486         uprobe->offset = offset;
487         init_rwsem(&uprobe->register_rwsem);
488         init_rwsem(&uprobe->consumer_rwsem);
489
490         /* add to uprobes_tree, sorted on inode:offset */
491         cur_uprobe = insert_uprobe(uprobe);
492         /* a uprobe exists for this inode:offset combination */
493         if (cur_uprobe) {
494                 kfree(uprobe);
495                 uprobe = cur_uprobe;
496                 iput(inode);
497         }
498
499         return uprobe;
500 }
501
502 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
503 {
504         down_write(&uprobe->consumer_rwsem);
505         uc->next = uprobe->consumers;
506         uprobe->consumers = uc;
507         up_write(&uprobe->consumer_rwsem);
508 }
509
510 /*
511  * For uprobe @uprobe, delete the consumer @uc.
512  * Return true if the @uc is deleted successfully
513  * or return false.
514  */
515 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
516 {
517         struct uprobe_consumer **con;
518         bool ret = false;
519
520         down_write(&uprobe->consumer_rwsem);
521         for (con = &uprobe->consumers; *con; con = &(*con)->next) {
522                 if (*con == uc) {
523                         *con = uc->next;
524                         ret = true;
525                         break;
526                 }
527         }
528         up_write(&uprobe->consumer_rwsem);
529
530         return ret;
531 }
532
533 static int __copy_insn(struct address_space *mapping, struct file *filp,
534                         void *insn, int nbytes, loff_t offset)
535 {
536         struct page *page;
537         /*
538          * Ensure that the page that has the original instruction is populated
539          * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
540          * see uprobe_register().
541          */
542         if (mapping->a_ops->readpage)
543                 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
544         else
545                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
546         if (IS_ERR(page))
547                 return PTR_ERR(page);
548
549         copy_from_page(page, offset, insn, nbytes);
550         put_page(page);
551
552         return 0;
553 }
554
555 static int copy_insn(struct uprobe *uprobe, struct file *filp)
556 {
557         struct address_space *mapping = uprobe->inode->i_mapping;
558         loff_t offs = uprobe->offset;
559         void *insn = &uprobe->arch.insn;
560         int size = sizeof(uprobe->arch.insn);
561         int len, err = -EIO;
562
563         /* Copy only available bytes, -EIO if nothing was read */
564         do {
565                 if (offs >= i_size_read(uprobe->inode))
566                         break;
567
568                 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
569                 err = __copy_insn(mapping, filp, insn, len, offs);
570                 if (err)
571                         break;
572
573                 insn += len;
574                 offs += len;
575                 size -= len;
576         } while (size);
577
578         return err;
579 }
580
581 static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
582                                 struct mm_struct *mm, unsigned long vaddr)
583 {
584         int ret = 0;
585
586         if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
587                 return ret;
588
589         /* TODO: move this into _register, until then we abuse this sem. */
590         down_write(&uprobe->consumer_rwsem);
591         if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
592                 goto out;
593
594         ret = copy_insn(uprobe, file);
595         if (ret)
596                 goto out;
597
598         ret = -ENOTSUPP;
599         if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
600                 goto out;
601
602         ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
603         if (ret)
604                 goto out;
605
606         /* uprobe_write_opcode() assumes we don't cross page boundary */
607         BUG_ON((uprobe->offset & ~PAGE_MASK) +
608                         UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
609
610         smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
611         set_bit(UPROBE_COPY_INSN, &uprobe->flags);
612
613  out:
614         up_write(&uprobe->consumer_rwsem);
615
616         return ret;
617 }
618
619 static inline bool consumer_filter(struct uprobe_consumer *uc,
620                                    enum uprobe_filter_ctx ctx, struct mm_struct *mm)
621 {
622         return !uc->filter || uc->filter(uc, ctx, mm);
623 }
624
625 static bool filter_chain(struct uprobe *uprobe,
626                          enum uprobe_filter_ctx ctx, struct mm_struct *mm)
627 {
628         struct uprobe_consumer *uc;
629         bool ret = false;
630
631         down_read(&uprobe->consumer_rwsem);
632         for (uc = uprobe->consumers; uc; uc = uc->next) {
633                 ret = consumer_filter(uc, ctx, mm);
634                 if (ret)
635                         break;
636         }
637         up_read(&uprobe->consumer_rwsem);
638
639         return ret;
640 }
641
642 static int
643 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
644                         struct vm_area_struct *vma, unsigned long vaddr)
645 {
646         bool first_uprobe;
647         int ret;
648
649         ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
650         if (ret)
651                 return ret;
652
653         /*
654          * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
655          * the task can hit this breakpoint right after __replace_page().
656          */
657         first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
658         if (first_uprobe)
659                 set_bit(MMF_HAS_UPROBES, &mm->flags);
660
661         ret = set_swbp(&uprobe->arch, mm, vaddr);
662         if (!ret)
663                 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
664         else if (first_uprobe)
665                 clear_bit(MMF_HAS_UPROBES, &mm->flags);
666
667         return ret;
668 }
669
670 static int
671 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
672 {
673         set_bit(MMF_RECALC_UPROBES, &mm->flags);
674         return set_orig_insn(&uprobe->arch, mm, vaddr);
675 }
676
677 static inline bool uprobe_is_active(struct uprobe *uprobe)
678 {
679         return !RB_EMPTY_NODE(&uprobe->rb_node);
680 }
681 /*
682  * There could be threads that have already hit the breakpoint. They
683  * will recheck the current insn and restart if find_uprobe() fails.
684  * See find_active_uprobe().
685  */
686 static void delete_uprobe(struct uprobe *uprobe)
687 {
688         if (WARN_ON(!uprobe_is_active(uprobe)))
689                 return;
690
691         spin_lock(&uprobes_treelock);
692         rb_erase(&uprobe->rb_node, &uprobes_tree);
693         spin_unlock(&uprobes_treelock);
694         RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
695         iput(uprobe->inode);
696         put_uprobe(uprobe);
697 }
698
699 struct map_info {
700         struct map_info *next;
701         struct mm_struct *mm;
702         unsigned long vaddr;
703 };
704
705 static inline struct map_info *free_map_info(struct map_info *info)
706 {
707         struct map_info *next = info->next;
708         kfree(info);
709         return next;
710 }
711
712 static struct map_info *
713 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
714 {
715         unsigned long pgoff = offset >> PAGE_SHIFT;
716         struct vm_area_struct *vma;
717         struct map_info *curr = NULL;
718         struct map_info *prev = NULL;
719         struct map_info *info;
720         int more = 0;
721
722  again:
723         i_mmap_lock_read(mapping);
724         vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
725                 if (!valid_vma(vma, is_register))
726                         continue;
727
728                 if (!prev && !more) {
729                         /*
730                          * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
731                          * reclaim. This is optimistic, no harm done if it fails.
732                          */
733                         prev = kmalloc(sizeof(struct map_info),
734                                         GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
735                         if (prev)
736                                 prev->next = NULL;
737                 }
738                 if (!prev) {
739                         more++;
740                         continue;
741                 }
742
743                 if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
744                         continue;
745
746                 info = prev;
747                 prev = prev->next;
748                 info->next = curr;
749                 curr = info;
750
751                 info->mm = vma->vm_mm;
752                 info->vaddr = offset_to_vaddr(vma, offset);
753         }
754         i_mmap_unlock_read(mapping);
755
756         if (!more)
757                 goto out;
758
759         prev = curr;
760         while (curr) {
761                 mmput(curr->mm);
762                 curr = curr->next;
763         }
764
765         do {
766                 info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
767                 if (!info) {
768                         curr = ERR_PTR(-ENOMEM);
769                         goto out;
770                 }
771                 info->next = prev;
772                 prev = info;
773         } while (--more);
774
775         goto again;
776  out:
777         while (prev)
778                 prev = free_map_info(prev);
779         return curr;
780 }
781
782 static int
783 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
784 {
785         bool is_register = !!new;
786         struct map_info *info;
787         int err = 0;
788
789         percpu_down_write(&dup_mmap_sem);
790         info = build_map_info(uprobe->inode->i_mapping,
791                                         uprobe->offset, is_register);
792         if (IS_ERR(info)) {
793                 err = PTR_ERR(info);
794                 goto out;
795         }
796
797         while (info) {
798                 struct mm_struct *mm = info->mm;
799                 struct vm_area_struct *vma;
800
801                 if (err && is_register)
802                         goto free;
803
804                 down_write(&mm->mmap_sem);
805                 vma = find_vma(mm, info->vaddr);
806                 if (!vma || !valid_vma(vma, is_register) ||
807                     file_inode(vma->vm_file) != uprobe->inode)
808                         goto unlock;
809
810                 if (vma->vm_start > info->vaddr ||
811                     vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
812                         goto unlock;
813
814                 if (is_register) {
815                         /* consult only the "caller", new consumer. */
816                         if (consumer_filter(new,
817                                         UPROBE_FILTER_REGISTER, mm))
818                                 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
819                 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
820                         if (!filter_chain(uprobe,
821                                         UPROBE_FILTER_UNREGISTER, mm))
822                                 err |= remove_breakpoint(uprobe, mm, info->vaddr);
823                 }
824
825  unlock:
826                 up_write(&mm->mmap_sem);
827  free:
828                 mmput(mm);
829                 info = free_map_info(info);
830         }
831  out:
832         percpu_up_write(&dup_mmap_sem);
833         return err;
834 }
835
836 static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
837 {
838         consumer_add(uprobe, uc);
839         return register_for_each_vma(uprobe, uc);
840 }
841
842 static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
843 {
844         int err;
845
846         if (WARN_ON(!consumer_del(uprobe, uc)))
847                 return;
848
849         err = register_for_each_vma(uprobe, NULL);
850         /* TODO : cant unregister? schedule a worker thread */
851         if (!uprobe->consumers && !err)
852                 delete_uprobe(uprobe);
853 }
854
855 /*
856  * uprobe_register - register a probe
857  * @inode: the file in which the probe has to be placed.
858  * @offset: offset from the start of the file.
859  * @uc: information on howto handle the probe..
860  *
861  * Apart from the access refcount, uprobe_register() takes a creation
862  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
863  * inserted into the rbtree (i.e first consumer for a @inode:@offset
864  * tuple).  Creation refcount stops uprobe_unregister from freeing the
865  * @uprobe even before the register operation is complete. Creation
866  * refcount is released when the last @uc for the @uprobe
867  * unregisters.
868  *
869  * Return errno if it cannot successully install probes
870  * else return 0 (success)
871  */
872 int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
873 {
874         struct uprobe *uprobe;
875         int ret;
876
877         /* Uprobe must have at least one set consumer */
878         if (!uc->handler && !uc->ret_handler)
879                 return -EINVAL;
880
881         /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
882         if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
883                 return -EIO;
884         /* Racy, just to catch the obvious mistakes */
885         if (offset > i_size_read(inode))
886                 return -EINVAL;
887
888  retry:
889         uprobe = alloc_uprobe(inode, offset);
890         if (!uprobe)
891                 return -ENOMEM;
892         /*
893          * We can race with uprobe_unregister()->delete_uprobe().
894          * Check uprobe_is_active() and retry if it is false.
895          */
896         down_write(&uprobe->register_rwsem);
897         ret = -EAGAIN;
898         if (likely(uprobe_is_active(uprobe))) {
899                 ret = __uprobe_register(uprobe, uc);
900                 if (ret)
901                         __uprobe_unregister(uprobe, uc);
902         }
903         up_write(&uprobe->register_rwsem);
904         put_uprobe(uprobe);
905
906         if (unlikely(ret == -EAGAIN))
907                 goto retry;
908         return ret;
909 }
910 EXPORT_SYMBOL_GPL(uprobe_register);
911
912 /*
913  * uprobe_apply - unregister a already registered probe.
914  * @inode: the file in which the probe has to be removed.
915  * @offset: offset from the start of the file.
916  * @uc: consumer which wants to add more or remove some breakpoints
917  * @add: add or remove the breakpoints
918  */
919 int uprobe_apply(struct inode *inode, loff_t offset,
920                         struct uprobe_consumer *uc, bool add)
921 {
922         struct uprobe *uprobe;
923         struct uprobe_consumer *con;
924         int ret = -ENOENT;
925
926         uprobe = find_uprobe(inode, offset);
927         if (WARN_ON(!uprobe))
928                 return ret;
929
930         down_write(&uprobe->register_rwsem);
931         for (con = uprobe->consumers; con && con != uc ; con = con->next)
932                 ;
933         if (con)
934                 ret = register_for_each_vma(uprobe, add ? uc : NULL);
935         up_write(&uprobe->register_rwsem);
936         put_uprobe(uprobe);
937
938         return ret;
939 }
940
941 /*
942  * uprobe_unregister - unregister a already registered probe.
943  * @inode: the file in which the probe has to be removed.
944  * @offset: offset from the start of the file.
945  * @uc: identify which probe if multiple probes are colocated.
946  */
947 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
948 {
949         struct uprobe *uprobe;
950
951         uprobe = find_uprobe(inode, offset);
952         if (WARN_ON(!uprobe))
953                 return;
954
955         down_write(&uprobe->register_rwsem);
956         __uprobe_unregister(uprobe, uc);
957         up_write(&uprobe->register_rwsem);
958         put_uprobe(uprobe);
959 }
960 EXPORT_SYMBOL_GPL(uprobe_unregister);
961
962 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
963 {
964         struct vm_area_struct *vma;
965         int err = 0;
966
967         down_read(&mm->mmap_sem);
968         for (vma = mm->mmap; vma; vma = vma->vm_next) {
969                 unsigned long vaddr;
970                 loff_t offset;
971
972                 if (!valid_vma(vma, false) ||
973                     file_inode(vma->vm_file) != uprobe->inode)
974                         continue;
975
976                 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
977                 if (uprobe->offset <  offset ||
978                     uprobe->offset >= offset + vma->vm_end - vma->vm_start)
979                         continue;
980
981                 vaddr = offset_to_vaddr(vma, uprobe->offset);
982                 err |= remove_breakpoint(uprobe, mm, vaddr);
983         }
984         up_read(&mm->mmap_sem);
985
986         return err;
987 }
988
989 static struct rb_node *
990 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
991 {
992         struct rb_node *n = uprobes_tree.rb_node;
993
994         while (n) {
995                 struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
996
997                 if (inode < u->inode) {
998                         n = n->rb_left;
999                 } else if (inode > u->inode) {
1000                         n = n->rb_right;
1001                 } else {
1002                         if (max < u->offset)
1003                                 n = n->rb_left;
1004                         else if (min > u->offset)
1005                                 n = n->rb_right;
1006                         else
1007                                 break;
1008                 }
1009         }
1010
1011         return n;
1012 }
1013
1014 /*
1015  * For a given range in vma, build a list of probes that need to be inserted.
1016  */
1017 static void build_probe_list(struct inode *inode,
1018                                 struct vm_area_struct *vma,
1019                                 unsigned long start, unsigned long end,
1020                                 struct list_head *head)
1021 {
1022         loff_t min, max;
1023         struct rb_node *n, *t;
1024         struct uprobe *u;
1025
1026         INIT_LIST_HEAD(head);
1027         min = vaddr_to_offset(vma, start);
1028         max = min + (end - start) - 1;
1029
1030         spin_lock(&uprobes_treelock);
1031         n = find_node_in_range(inode, min, max);
1032         if (n) {
1033                 for (t = n; t; t = rb_prev(t)) {
1034                         u = rb_entry(t, struct uprobe, rb_node);
1035                         if (u->inode != inode || u->offset < min)
1036                                 break;
1037                         list_add(&u->pending_list, head);
1038                         get_uprobe(u);
1039                 }
1040                 for (t = n; (t = rb_next(t)); ) {
1041                         u = rb_entry(t, struct uprobe, rb_node);
1042                         if (u->inode != inode || u->offset > max)
1043                                 break;
1044                         list_add(&u->pending_list, head);
1045                         get_uprobe(u);
1046                 }
1047         }
1048         spin_unlock(&uprobes_treelock);
1049 }
1050
1051 /*
1052  * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1053  *
1054  * Currently we ignore all errors and always return 0, the callers
1055  * can't handle the failure anyway.
1056  */
1057 int uprobe_mmap(struct vm_area_struct *vma)
1058 {
1059         struct list_head tmp_list;
1060         struct uprobe *uprobe, *u;
1061         struct inode *inode;
1062
1063         if (no_uprobe_events() || !valid_vma(vma, true))
1064                 return 0;
1065
1066         inode = file_inode(vma->vm_file);
1067         if (!inode)
1068                 return 0;
1069
1070         mutex_lock(uprobes_mmap_hash(inode));
1071         build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1072         /*
1073          * We can race with uprobe_unregister(), this uprobe can be already
1074          * removed. But in this case filter_chain() must return false, all
1075          * consumers have gone away.
1076          */
1077         list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1078                 if (!fatal_signal_pending(current) &&
1079                     filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1080                         unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1081                         install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1082                 }
1083                 put_uprobe(uprobe);
1084         }
1085         mutex_unlock(uprobes_mmap_hash(inode));
1086
1087         return 0;
1088 }
1089
1090 static bool
1091 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1092 {
1093         loff_t min, max;
1094         struct inode *inode;
1095         struct rb_node *n;
1096
1097         inode = file_inode(vma->vm_file);
1098
1099         min = vaddr_to_offset(vma, start);
1100         max = min + (end - start) - 1;
1101
1102         spin_lock(&uprobes_treelock);
1103         n = find_node_in_range(inode, min, max);
1104         spin_unlock(&uprobes_treelock);
1105
1106         return !!n;
1107 }
1108
1109 /*
1110  * Called in context of a munmap of a vma.
1111  */
1112 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1113 {
1114         if (no_uprobe_events() || !valid_vma(vma, false))
1115                 return;
1116
1117         if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1118                 return;
1119
1120         if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1121              test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1122                 return;
1123
1124         if (vma_has_uprobes(vma, start, end))
1125                 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1126 }
1127
1128 /* Slot allocation for XOL */
1129 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1130 {
1131         struct vm_area_struct *vma;
1132         int ret;
1133
1134         if (down_write_killable(&mm->mmap_sem))
1135                 return -EINTR;
1136
1137         if (mm->uprobes_state.xol_area) {
1138                 ret = -EALREADY;
1139                 goto fail;
1140         }
1141
1142         if (!area->vaddr) {
1143                 /* Try to map as high as possible, this is only a hint. */
1144                 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1145                                                 PAGE_SIZE, 0, 0);
1146                 if (area->vaddr & ~PAGE_MASK) {
1147                         ret = area->vaddr;
1148                         goto fail;
1149                 }
1150         }
1151
1152         vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1153                                 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1154                                 &area->xol_mapping);
1155         if (IS_ERR(vma)) {
1156                 ret = PTR_ERR(vma);
1157                 goto fail;
1158         }
1159
1160         ret = 0;
1161         smp_wmb();      /* pairs with get_xol_area() */
1162         mm->uprobes_state.xol_area = area;
1163  fail:
1164         up_write(&mm->mmap_sem);
1165
1166         return ret;
1167 }
1168
1169 static struct xol_area *__create_xol_area(unsigned long vaddr)
1170 {
1171         struct mm_struct *mm = current->mm;
1172         uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1173         struct xol_area *area;
1174
1175         area = kmalloc(sizeof(*area), GFP_KERNEL);
1176         if (unlikely(!area))
1177                 goto out;
1178
1179         area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1180         if (!area->bitmap)
1181                 goto free_area;
1182
1183         area->xol_mapping.name = "[uprobes]";
1184         area->xol_mapping.fault = NULL;
1185         area->xol_mapping.pages = area->pages;
1186         area->pages[0] = alloc_page(GFP_HIGHUSER);
1187         if (!area->pages[0])
1188                 goto free_bitmap;
1189         area->pages[1] = NULL;
1190
1191         area->vaddr = vaddr;
1192         init_waitqueue_head(&area->wq);
1193         /* Reserve the 1st slot for get_trampoline_vaddr() */
1194         set_bit(0, area->bitmap);
1195         atomic_set(&area->slot_count, 1);
1196         copy_to_page(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1197
1198         if (!xol_add_vma(mm, area))
1199                 return area;
1200
1201         __free_page(area->pages[0]);
1202  free_bitmap:
1203         kfree(area->bitmap);
1204  free_area:
1205         kfree(area);
1206  out:
1207         return NULL;
1208 }
1209
1210 /*
1211  * get_xol_area - Allocate process's xol_area if necessary.
1212  * This area will be used for storing instructions for execution out of line.
1213  *
1214  * Returns the allocated area or NULL.
1215  */
1216 static struct xol_area *get_xol_area(void)
1217 {
1218         struct mm_struct *mm = current->mm;
1219         struct xol_area *area;
1220
1221         if (!mm->uprobes_state.xol_area)
1222                 __create_xol_area(0);
1223
1224         area = mm->uprobes_state.xol_area;
1225         smp_read_barrier_depends();     /* pairs with wmb in xol_add_vma() */
1226         return area;
1227 }
1228
1229 /*
1230  * uprobe_clear_state - Free the area allocated for slots.
1231  */
1232 void uprobe_clear_state(struct mm_struct *mm)
1233 {
1234         struct xol_area *area = mm->uprobes_state.xol_area;
1235
1236         if (!area)
1237                 return;
1238
1239         put_page(area->pages[0]);
1240         kfree(area->bitmap);
1241         kfree(area);
1242 }
1243
1244 void uprobe_start_dup_mmap(void)
1245 {
1246         percpu_down_read(&dup_mmap_sem);
1247 }
1248
1249 void uprobe_end_dup_mmap(void)
1250 {
1251         percpu_up_read(&dup_mmap_sem);
1252 }
1253
1254 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1255 {
1256         newmm->uprobes_state.xol_area = NULL;
1257
1258         if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1259                 set_bit(MMF_HAS_UPROBES, &newmm->flags);
1260                 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1261                 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1262         }
1263 }
1264
1265 /*
1266  *  - search for a free slot.
1267  */
1268 static unsigned long xol_take_insn_slot(struct xol_area *area)
1269 {
1270         unsigned long slot_addr;
1271         int slot_nr;
1272
1273         do {
1274                 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1275                 if (slot_nr < UINSNS_PER_PAGE) {
1276                         if (!test_and_set_bit(slot_nr, area->bitmap))
1277                                 break;
1278
1279                         slot_nr = UINSNS_PER_PAGE;
1280                         continue;
1281                 }
1282                 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1283         } while (slot_nr >= UINSNS_PER_PAGE);
1284
1285         slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1286         atomic_inc(&area->slot_count);
1287
1288         return slot_addr;
1289 }
1290
1291 /*
1292  * xol_get_insn_slot - allocate a slot for xol.
1293  * Returns the allocated slot address or 0.
1294  */
1295 static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1296 {
1297         struct xol_area *area;
1298         unsigned long xol_vaddr;
1299
1300         area = get_xol_area();
1301         if (!area)
1302                 return 0;
1303
1304         xol_vaddr = xol_take_insn_slot(area);
1305         if (unlikely(!xol_vaddr))
1306                 return 0;
1307
1308         arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1309                               &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1310
1311         return xol_vaddr;
1312 }
1313
1314 /*
1315  * xol_free_insn_slot - If slot was earlier allocated by
1316  * @xol_get_insn_slot(), make the slot available for
1317  * subsequent requests.
1318  */
1319 static void xol_free_insn_slot(struct task_struct *tsk)
1320 {
1321         struct xol_area *area;
1322         unsigned long vma_end;
1323         unsigned long slot_addr;
1324
1325         if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1326                 return;
1327
1328         slot_addr = tsk->utask->xol_vaddr;
1329         if (unlikely(!slot_addr))
1330                 return;
1331
1332         area = tsk->mm->uprobes_state.xol_area;
1333         vma_end = area->vaddr + PAGE_SIZE;
1334         if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1335                 unsigned long offset;
1336                 int slot_nr;
1337
1338                 offset = slot_addr - area->vaddr;
1339                 slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1340                 if (slot_nr >= UINSNS_PER_PAGE)
1341                         return;
1342
1343                 clear_bit(slot_nr, area->bitmap);
1344                 atomic_dec(&area->slot_count);
1345                 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1346                 if (waitqueue_active(&area->wq))
1347                         wake_up(&area->wq);
1348
1349                 tsk->utask->xol_vaddr = 0;
1350         }
1351 }
1352
1353 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1354                                   void *src, unsigned long len)
1355 {
1356         /* Initialize the slot */
1357         copy_to_page(page, vaddr, src, len);
1358
1359         /*
1360          * We probably need flush_icache_user_range() but it needs vma.
1361          * This should work on most of architectures by default. If
1362          * architecture needs to do something different it can define
1363          * its own version of the function.
1364          */
1365         flush_dcache_page(page);
1366 }
1367
1368 /**
1369  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1370  * @regs: Reflects the saved state of the task after it has hit a breakpoint
1371  * instruction.
1372  * Return the address of the breakpoint instruction.
1373  */
1374 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1375 {
1376         return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1377 }
1378
1379 unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1380 {
1381         struct uprobe_task *utask = current->utask;
1382
1383         if (unlikely(utask && utask->active_uprobe))
1384                 return utask->vaddr;
1385
1386         return instruction_pointer(regs);
1387 }
1388
1389 static struct return_instance *free_ret_instance(struct return_instance *ri)
1390 {
1391         struct return_instance *next = ri->next;
1392         put_uprobe(ri->uprobe);
1393         kfree(ri);
1394         return next;
1395 }
1396
1397 /*
1398  * Called with no locks held.
1399  * Called in context of a exiting or a exec-ing thread.
1400  */
1401 void uprobe_free_utask(struct task_struct *t)
1402 {
1403         struct uprobe_task *utask = t->utask;
1404         struct return_instance *ri;
1405
1406         if (!utask)
1407                 return;
1408
1409         if (utask->active_uprobe)
1410                 put_uprobe(utask->active_uprobe);
1411
1412         ri = utask->return_instances;
1413         while (ri)
1414                 ri = free_ret_instance(ri);
1415
1416         xol_free_insn_slot(t);
1417         kfree(utask);
1418         t->utask = NULL;
1419 }
1420
1421 /*
1422  * Allocate a uprobe_task object for the task if if necessary.
1423  * Called when the thread hits a breakpoint.
1424  *
1425  * Returns:
1426  * - pointer to new uprobe_task on success
1427  * - NULL otherwise
1428  */
1429 static struct uprobe_task *get_utask(void)
1430 {
1431         if (!current->utask)
1432                 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1433         return current->utask;
1434 }
1435
1436 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1437 {
1438         struct uprobe_task *n_utask;
1439         struct return_instance **p, *o, *n;
1440
1441         n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1442         if (!n_utask)
1443                 return -ENOMEM;
1444         t->utask = n_utask;
1445
1446         p = &n_utask->return_instances;
1447         for (o = o_utask->return_instances; o; o = o->next) {
1448                 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1449                 if (!n)
1450                         return -ENOMEM;
1451
1452                 *n = *o;
1453                 get_uprobe(n->uprobe);
1454                 n->next = NULL;
1455
1456                 *p = n;
1457                 p = &n->next;
1458                 n_utask->depth++;
1459         }
1460
1461         return 0;
1462 }
1463
1464 static void uprobe_warn(struct task_struct *t, const char *msg)
1465 {
1466         pr_warn("uprobe: %s:%d failed to %s\n",
1467                         current->comm, current->pid, msg);
1468 }
1469
1470 static void dup_xol_work(struct callback_head *work)
1471 {
1472         if (current->flags & PF_EXITING)
1473                 return;
1474
1475         if (!__create_xol_area(current->utask->dup_xol_addr) &&
1476                         !fatal_signal_pending(current))
1477                 uprobe_warn(current, "dup xol area");
1478 }
1479
1480 /*
1481  * Called in context of a new clone/fork from copy_process.
1482  */
1483 void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1484 {
1485         struct uprobe_task *utask = current->utask;
1486         struct mm_struct *mm = current->mm;
1487         struct xol_area *area;
1488
1489         t->utask = NULL;
1490
1491         if (!utask || !utask->return_instances)
1492                 return;
1493
1494         if (mm == t->mm && !(flags & CLONE_VFORK))
1495                 return;
1496
1497         if (dup_utask(t, utask))
1498                 return uprobe_warn(t, "dup ret instances");
1499
1500         /* The task can fork() after dup_xol_work() fails */
1501         area = mm->uprobes_state.xol_area;
1502         if (!area)
1503                 return uprobe_warn(t, "dup xol area");
1504
1505         if (mm == t->mm)
1506                 return;
1507
1508         t->utask->dup_xol_addr = area->vaddr;
1509         init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1510         task_work_add(t, &t->utask->dup_xol_work, true);
1511 }
1512
1513 /*
1514  * Current area->vaddr notion assume the trampoline address is always
1515  * equal area->vaddr.
1516  *
1517  * Returns -1 in case the xol_area is not allocated.
1518  */
1519 static unsigned long get_trampoline_vaddr(void)
1520 {
1521         struct xol_area *area;
1522         unsigned long trampoline_vaddr = -1;
1523
1524         area = current->mm->uprobes_state.xol_area;
1525         smp_read_barrier_depends();
1526         if (area)
1527                 trampoline_vaddr = area->vaddr;
1528
1529         return trampoline_vaddr;
1530 }
1531
1532 static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1533                                         struct pt_regs *regs)
1534 {
1535         struct return_instance *ri = utask->return_instances;
1536         enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
1537
1538         while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1539                 ri = free_ret_instance(ri);
1540                 utask->depth--;
1541         }
1542         utask->return_instances = ri;
1543 }
1544
1545 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1546 {
1547         struct return_instance *ri;
1548         struct uprobe_task *utask;
1549         unsigned long orig_ret_vaddr, trampoline_vaddr;
1550         bool chained;
1551
1552         if (!get_xol_area())
1553                 return;
1554
1555         utask = get_utask();
1556         if (!utask)
1557                 return;
1558
1559         if (utask->depth >= MAX_URETPROBE_DEPTH) {
1560                 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1561                                 " nestedness limit pid/tgid=%d/%d\n",
1562                                 current->pid, current->tgid);
1563                 return;
1564         }
1565
1566         ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1567         if (!ri)
1568                 return;
1569
1570         trampoline_vaddr = get_trampoline_vaddr();
1571         orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1572         if (orig_ret_vaddr == -1)
1573                 goto fail;
1574
1575         /* drop the entries invalidated by longjmp() */
1576         chained = (orig_ret_vaddr == trampoline_vaddr);
1577         cleanup_return_instances(utask, chained, regs);
1578
1579         /*
1580          * We don't want to keep trampoline address in stack, rather keep the
1581          * original return address of first caller thru all the consequent
1582          * instances. This also makes breakpoint unwrapping easier.
1583          */
1584         if (chained) {
1585                 if (!utask->return_instances) {
1586                         /*
1587                          * This situation is not possible. Likely we have an
1588                          * attack from user-space.
1589                          */
1590                         uprobe_warn(current, "handle tail call");
1591                         goto fail;
1592                 }
1593                 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1594         }
1595
1596         ri->uprobe = get_uprobe(uprobe);
1597         ri->func = instruction_pointer(regs);
1598         ri->stack = user_stack_pointer(regs);
1599         ri->orig_ret_vaddr = orig_ret_vaddr;
1600         ri->chained = chained;
1601
1602         utask->depth++;
1603         ri->next = utask->return_instances;
1604         utask->return_instances = ri;
1605
1606         return;
1607  fail:
1608         kfree(ri);
1609 }
1610
1611 /* Prepare to single-step probed instruction out of line. */
1612 static int
1613 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1614 {
1615         struct uprobe_task *utask;
1616         unsigned long xol_vaddr;
1617         int err;
1618
1619         utask = get_utask();
1620         if (!utask)
1621                 return -ENOMEM;
1622
1623         xol_vaddr = xol_get_insn_slot(uprobe);
1624         if (!xol_vaddr)
1625                 return -ENOMEM;
1626
1627         utask->xol_vaddr = xol_vaddr;
1628         utask->vaddr = bp_vaddr;
1629
1630         err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1631         if (unlikely(err)) {
1632                 xol_free_insn_slot(current);
1633                 return err;
1634         }
1635
1636         utask->active_uprobe = uprobe;
1637         utask->state = UTASK_SSTEP;
1638         return 0;
1639 }
1640
1641 /*
1642  * If we are singlestepping, then ensure this thread is not connected to
1643  * non-fatal signals until completion of singlestep.  When xol insn itself
1644  * triggers the signal,  restart the original insn even if the task is
1645  * already SIGKILL'ed (since coredump should report the correct ip).  This
1646  * is even more important if the task has a handler for SIGSEGV/etc, The
1647  * _same_ instruction should be repeated again after return from the signal
1648  * handler, and SSTEP can never finish in this case.
1649  */
1650 bool uprobe_deny_signal(void)
1651 {
1652         struct task_struct *t = current;
1653         struct uprobe_task *utask = t->utask;
1654
1655         if (likely(!utask || !utask->active_uprobe))
1656                 return false;
1657
1658         WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1659
1660         if (signal_pending(t)) {
1661                 spin_lock_irq(&t->sighand->siglock);
1662                 clear_tsk_thread_flag(t, TIF_SIGPENDING);
1663                 spin_unlock_irq(&t->sighand->siglock);
1664
1665                 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1666                         utask->state = UTASK_SSTEP_TRAPPED;
1667                         set_tsk_thread_flag(t, TIF_UPROBE);
1668                 }
1669         }
1670
1671         return true;
1672 }
1673
1674 static void mmf_recalc_uprobes(struct mm_struct *mm)
1675 {
1676         struct vm_area_struct *vma;
1677
1678         for (vma = mm->mmap; vma; vma = vma->vm_next) {
1679                 if (!valid_vma(vma, false))
1680                         continue;
1681                 /*
1682                  * This is not strictly accurate, we can race with
1683                  * uprobe_unregister() and see the already removed
1684                  * uprobe if delete_uprobe() was not yet called.
1685                  * Or this uprobe can be filtered out.
1686                  */
1687                 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1688                         return;
1689         }
1690
1691         clear_bit(MMF_HAS_UPROBES, &mm->flags);
1692 }
1693
1694 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
1695 {
1696         struct page *page;
1697         uprobe_opcode_t opcode;
1698         int result;
1699
1700         pagefault_disable();
1701         result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
1702         pagefault_enable();
1703
1704         if (likely(result == 0))
1705                 goto out;
1706
1707         /*
1708          * The NULL 'tsk' here ensures that any faults that occur here
1709          * will not be accounted to the task.  'mm' *is* current->mm,
1710          * but we treat this as a 'remote' access since it is
1711          * essentially a kernel access to the memory.
1712          */
1713         result = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
1714         if (result < 0)
1715                 return result;
1716
1717         copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
1718         put_page(page);
1719  out:
1720         /* This needs to return true for any variant of the trap insn */
1721         return is_trap_insn(&opcode);
1722 }
1723
1724 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1725 {
1726         struct mm_struct *mm = current->mm;
1727         struct uprobe *uprobe = NULL;
1728         struct vm_area_struct *vma;
1729
1730         down_read(&mm->mmap_sem);
1731         vma = find_vma(mm, bp_vaddr);
1732         if (vma && vma->vm_start <= bp_vaddr) {
1733                 if (valid_vma(vma, false)) {
1734                         struct inode *inode = file_inode(vma->vm_file);
1735                         loff_t offset = vaddr_to_offset(vma, bp_vaddr);
1736
1737                         uprobe = find_uprobe(inode, offset);
1738                 }
1739
1740                 if (!uprobe)
1741                         *is_swbp = is_trap_at_addr(mm, bp_vaddr);
1742         } else {
1743                 *is_swbp = -EFAULT;
1744         }
1745
1746         if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1747                 mmf_recalc_uprobes(mm);
1748         up_read(&mm->mmap_sem);
1749
1750         return uprobe;
1751 }
1752
1753 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
1754 {
1755         struct uprobe_consumer *uc;
1756         int remove = UPROBE_HANDLER_REMOVE;
1757         bool need_prep = false; /* prepare return uprobe, when needed */
1758
1759         down_read(&uprobe->register_rwsem);
1760         for (uc = uprobe->consumers; uc; uc = uc->next) {
1761                 int rc = 0;
1762
1763                 if (uc->handler) {
1764                         rc = uc->handler(uc, regs);
1765                         WARN(rc & ~UPROBE_HANDLER_MASK,
1766                                 "bad rc=0x%x from %pf()\n", rc, uc->handler);
1767                 }
1768
1769                 if (uc->ret_handler)
1770                         need_prep = true;
1771
1772                 remove &= rc;
1773         }
1774
1775         if (need_prep && !remove)
1776                 prepare_uretprobe(uprobe, regs); /* put bp at return */
1777
1778         if (remove && uprobe->consumers) {
1779                 WARN_ON(!uprobe_is_active(uprobe));
1780                 unapply_uprobe(uprobe, current->mm);
1781         }
1782         up_read(&uprobe->register_rwsem);
1783 }
1784
1785 static void
1786 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
1787 {
1788         struct uprobe *uprobe = ri->uprobe;
1789         struct uprobe_consumer *uc;
1790
1791         down_read(&uprobe->register_rwsem);
1792         for (uc = uprobe->consumers; uc; uc = uc->next) {
1793                 if (uc->ret_handler)
1794                         uc->ret_handler(uc, ri->func, regs);
1795         }
1796         up_read(&uprobe->register_rwsem);
1797 }
1798
1799 static struct return_instance *find_next_ret_chain(struct return_instance *ri)
1800 {
1801         bool chained;
1802
1803         do {
1804                 chained = ri->chained;
1805                 ri = ri->next;  /* can't be NULL if chained */
1806         } while (chained);
1807
1808         return ri;
1809 }
1810
1811 static void handle_trampoline(struct pt_regs *regs)
1812 {
1813         struct uprobe_task *utask;
1814         struct return_instance *ri, *next;
1815         bool valid;
1816
1817         utask = current->utask;
1818         if (!utask)
1819                 goto sigill;
1820
1821         ri = utask->return_instances;
1822         if (!ri)
1823                 goto sigill;
1824
1825         do {
1826                 /*
1827                  * We should throw out the frames invalidated by longjmp().
1828                  * If this chain is valid, then the next one should be alive
1829                  * or NULL; the latter case means that nobody but ri->func
1830                  * could hit this trampoline on return. TODO: sigaltstack().
1831                  */
1832                 next = find_next_ret_chain(ri);
1833                 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
1834
1835                 instruction_pointer_set(regs, ri->orig_ret_vaddr);
1836                 do {
1837                         if (valid)
1838                                 handle_uretprobe_chain(ri, regs);
1839                         ri = free_ret_instance(ri);
1840                         utask->depth--;
1841                 } while (ri != next);
1842         } while (!valid);
1843
1844         utask->return_instances = ri;
1845         return;
1846
1847  sigill:
1848         uprobe_warn(current, "handle uretprobe, sending SIGILL.");
1849         force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1850
1851 }
1852
1853 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
1854 {
1855         return false;
1856 }
1857
1858 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1859                                         struct pt_regs *regs)
1860 {
1861         return true;
1862 }
1863
1864 /*
1865  * Run handler and ask thread to singlestep.
1866  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1867  */
1868 static void handle_swbp(struct pt_regs *regs)
1869 {
1870         struct uprobe *uprobe;
1871         unsigned long bp_vaddr;
1872         int uninitialized_var(is_swbp);
1873
1874         bp_vaddr = uprobe_get_swbp_addr(regs);
1875         if (bp_vaddr == get_trampoline_vaddr())
1876                 return handle_trampoline(regs);
1877
1878         uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
1879         if (!uprobe) {
1880                 if (is_swbp > 0) {
1881                         /* No matching uprobe; signal SIGTRAP. */
1882                         send_sig(SIGTRAP, current, 0);
1883                 } else {
1884                         /*
1885                          * Either we raced with uprobe_unregister() or we can't
1886                          * access this memory. The latter is only possible if
1887                          * another thread plays with our ->mm. In both cases
1888                          * we can simply restart. If this vma was unmapped we
1889                          * can pretend this insn was not executed yet and get
1890                          * the (correct) SIGSEGV after restart.
1891                          */
1892                         instruction_pointer_set(regs, bp_vaddr);
1893                 }
1894                 return;
1895         }
1896
1897         /* change it in advance for ->handler() and restart */
1898         instruction_pointer_set(regs, bp_vaddr);
1899
1900         /*
1901          * TODO: move copy_insn/etc into _register and remove this hack.
1902          * After we hit the bp, _unregister + _register can install the
1903          * new and not-yet-analyzed uprobe at the same address, restart.
1904          */
1905         smp_rmb(); /* pairs with wmb() in install_breakpoint() */
1906         if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
1907                 goto out;
1908
1909         /* Tracing handlers use ->utask to communicate with fetch methods */
1910         if (!get_utask())
1911                 goto out;
1912
1913         if (arch_uprobe_ignore(&uprobe->arch, regs))
1914                 goto out;
1915
1916         handler_chain(uprobe, regs);
1917
1918         if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1919                 goto out;
1920
1921         if (!pre_ssout(uprobe, regs, bp_vaddr))
1922                 return;
1923
1924         /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
1925 out:
1926         put_uprobe(uprobe);
1927 }
1928
1929 /*
1930  * Perform required fix-ups and disable singlestep.
1931  * Allow pending signals to take effect.
1932  */
1933 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1934 {
1935         struct uprobe *uprobe;
1936         int err = 0;
1937
1938         uprobe = utask->active_uprobe;
1939         if (utask->state == UTASK_SSTEP_ACK)
1940                 err = arch_uprobe_post_xol(&uprobe->arch, regs);
1941         else if (utask->state == UTASK_SSTEP_TRAPPED)
1942                 arch_uprobe_abort_xol(&uprobe->arch, regs);
1943         else
1944                 WARN_ON_ONCE(1);
1945
1946         put_uprobe(uprobe);
1947         utask->active_uprobe = NULL;
1948         utask->state = UTASK_RUNNING;
1949         xol_free_insn_slot(current);
1950
1951         spin_lock_irq(&current->sighand->siglock);
1952         recalc_sigpending(); /* see uprobe_deny_signal() */
1953         spin_unlock_irq(&current->sighand->siglock);
1954
1955         if (unlikely(err)) {
1956                 uprobe_warn(current, "execute the probed insn, sending SIGILL.");
1957                 force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1958         }
1959 }
1960
1961 /*
1962  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
1963  * allows the thread to return from interrupt. After that handle_swbp()
1964  * sets utask->active_uprobe.
1965  *
1966  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
1967  * and allows the thread to return from interrupt.
1968  *
1969  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1970  * uprobe_notify_resume().
1971  */
1972 void uprobe_notify_resume(struct pt_regs *regs)
1973 {
1974         struct uprobe_task *utask;
1975
1976         clear_thread_flag(TIF_UPROBE);
1977
1978         utask = current->utask;
1979         if (utask && utask->active_uprobe)
1980                 handle_singlestep(utask, regs);
1981         else
1982                 handle_swbp(regs);
1983 }
1984
1985 /*
1986  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1987  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1988  */
1989 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1990 {
1991         if (!current->mm)
1992                 return 0;
1993
1994         if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
1995             (!current->utask || !current->utask->return_instances))
1996                 return 0;
1997
1998         set_thread_flag(TIF_UPROBE);
1999         return 1;
2000 }
2001
2002 /*
2003  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2004  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2005  */
2006 int uprobe_post_sstep_notifier(struct pt_regs *regs)
2007 {
2008         struct uprobe_task *utask = current->utask;
2009
2010         if (!current->mm || !utask || !utask->active_uprobe)
2011                 /* task is currently not uprobed */
2012                 return 0;
2013
2014         utask->state = UTASK_SSTEP_ACK;
2015         set_thread_flag(TIF_UPROBE);
2016         return 1;
2017 }
2018
2019 static struct notifier_block uprobe_exception_nb = {
2020         .notifier_call          = arch_uprobe_exception_notify,
2021         .priority               = INT_MAX-1,    /* notified after kprobes, kgdb */
2022 };
2023
2024 static int __init init_uprobes(void)
2025 {
2026         int i;
2027
2028         for (i = 0; i < UPROBES_HASH_SZ; i++)
2029                 mutex_init(&uprobes_mmap_mutex[i]);
2030
2031         if (percpu_init_rwsem(&dup_mmap_sem))
2032                 return -ENOMEM;
2033
2034         return register_die_notifier(&uprobe_exception_nb);
2035 }
2036 __initcall(init_uprobes);