regulator: tps65086: Update regulator driver for the TPS65086 PMIC
[cascardo/linux.git] / kernel / events / uprobes.c
1 /*
2  * User-space Probes (UProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2008-2012
19  * Authors:
20  *      Srikar Dronamraju
21  *      Jim Keniston
22  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
23  */
24
25 #include <linux/kernel.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h>      /* read_mapping_page */
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/export.h>
31 #include <linux/rmap.h>         /* anon_vma_prepare */
32 #include <linux/mmu_notifier.h> /* set_pte_at_notify */
33 #include <linux/swap.h>         /* try_to_free_swap */
34 #include <linux/ptrace.h>       /* user_enable_single_step */
35 #include <linux/kdebug.h>       /* notifier mechanism */
36 #include "../../mm/internal.h"  /* munlock_vma_page */
37 #include <linux/percpu-rwsem.h>
38 #include <linux/task_work.h>
39 #include <linux/shmem_fs.h>
40
41 #include <linux/uprobes.h>
42
43 #define UINSNS_PER_PAGE                 (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
44 #define MAX_UPROBE_XOL_SLOTS            UINSNS_PER_PAGE
45
46 static struct rb_root uprobes_tree = RB_ROOT;
47 /*
48  * allows us to skip the uprobe_mmap if there are no uprobe events active
49  * at this time.  Probably a fine grained per inode count is better?
50  */
51 #define no_uprobe_events()      RB_EMPTY_ROOT(&uprobes_tree)
52
53 static DEFINE_SPINLOCK(uprobes_treelock);       /* serialize rbtree access */
54
55 #define UPROBES_HASH_SZ 13
56 /* serialize uprobe->pending_list */
57 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
58 #define uprobes_mmap_hash(v)    (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
59
60 static struct percpu_rw_semaphore dup_mmap_sem;
61
62 /* Have a copy of original instruction */
63 #define UPROBE_COPY_INSN        0
64
65 struct uprobe {
66         struct rb_node          rb_node;        /* node in the rb tree */
67         atomic_t                ref;
68         struct rw_semaphore     register_rwsem;
69         struct rw_semaphore     consumer_rwsem;
70         struct list_head        pending_list;
71         struct uprobe_consumer  *consumers;
72         struct inode            *inode;         /* Also hold a ref to inode */
73         loff_t                  offset;
74         unsigned long           flags;
75
76         /*
77          * The generic code assumes that it has two members of unknown type
78          * owned by the arch-specific code:
79          *
80          *      insn -  copy_insn() saves the original instruction here for
81          *              arch_uprobe_analyze_insn().
82          *
83          *      ixol -  potentially modified instruction to execute out of
84          *              line, copied to xol_area by xol_get_insn_slot().
85          */
86         struct arch_uprobe      arch;
87 };
88
89 /*
90  * Execute out of line area: anonymous executable mapping installed
91  * by the probed task to execute the copy of the original instruction
92  * mangled by set_swbp().
93  *
94  * On a breakpoint hit, thread contests for a slot.  It frees the
95  * slot after singlestep. Currently a fixed number of slots are
96  * allocated.
97  */
98 struct xol_area {
99         wait_queue_head_t               wq;             /* if all slots are busy */
100         atomic_t                        slot_count;     /* number of in-use slots */
101         unsigned long                   *bitmap;        /* 0 = free slot */
102
103         struct vm_special_mapping       xol_mapping;
104         struct page                     *pages[2];
105         /*
106          * We keep the vma's vm_start rather than a pointer to the vma
107          * itself.  The probed process or a naughty kernel module could make
108          * the vma go away, and we must handle that reasonably gracefully.
109          */
110         unsigned long                   vaddr;          /* Page(s) of instruction slots */
111 };
112
113 /*
114  * valid_vma: Verify if the specified vma is an executable vma
115  * Relax restrictions while unregistering: vm_flags might have
116  * changed after breakpoint was inserted.
117  *      - is_register: indicates if we are in register context.
118  *      - Return 1 if the specified virtual address is in an
119  *        executable vma.
120  */
121 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
122 {
123         vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
124
125         if (is_register)
126                 flags |= VM_WRITE;
127
128         return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
129 }
130
131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
132 {
133         return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
134 }
135
136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
137 {
138         return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
139 }
140
141 /**
142  * __replace_page - replace page in vma by new page.
143  * based on replace_page in mm/ksm.c
144  *
145  * @vma:      vma that holds the pte pointing to page
146  * @addr:     address the old @page is mapped at
147  * @page:     the cowed page we are replacing by kpage
148  * @kpage:    the modified page we replace page by
149  *
150  * Returns 0 on success, -EFAULT on failure.
151  */
152 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
153                                 struct page *page, struct page *kpage)
154 {
155         struct mm_struct *mm = vma->vm_mm;
156         spinlock_t *ptl;
157         pte_t *ptep;
158         int err;
159         /* For mmu_notifiers */
160         const unsigned long mmun_start = addr;
161         const unsigned long mmun_end   = addr + PAGE_SIZE;
162         struct mem_cgroup *memcg;
163
164         err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg);
165         if (err)
166                 return err;
167
168         /* For try_to_free_swap() and munlock_vma_page() below */
169         lock_page(page);
170
171         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
172         err = -EAGAIN;
173         ptep = page_check_address(page, mm, addr, &ptl, 0);
174         if (!ptep)
175                 goto unlock;
176
177         get_page(kpage);
178         page_add_new_anon_rmap(kpage, vma, addr);
179         mem_cgroup_commit_charge(kpage, memcg, false);
180         lru_cache_add_active_or_unevictable(kpage, vma);
181
182         if (!PageAnon(page)) {
183                 dec_mm_counter(mm, MM_FILEPAGES);
184                 inc_mm_counter(mm, MM_ANONPAGES);
185         }
186
187         flush_cache_page(vma, addr, pte_pfn(*ptep));
188         ptep_clear_flush_notify(vma, addr, ptep);
189         set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
190
191         page_remove_rmap(page);
192         if (!page_mapped(page))
193                 try_to_free_swap(page);
194         pte_unmap_unlock(ptep, ptl);
195
196         if (vma->vm_flags & VM_LOCKED)
197                 munlock_vma_page(page);
198         put_page(page);
199
200         err = 0;
201  unlock:
202         mem_cgroup_cancel_charge(kpage, memcg);
203         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
204         unlock_page(page);
205         return err;
206 }
207
208 /**
209  * is_swbp_insn - check if instruction is breakpoint instruction.
210  * @insn: instruction to be checked.
211  * Default implementation of is_swbp_insn
212  * Returns true if @insn is a breakpoint instruction.
213  */
214 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
215 {
216         return *insn == UPROBE_SWBP_INSN;
217 }
218
219 /**
220  * is_trap_insn - check if instruction is breakpoint instruction.
221  * @insn: instruction to be checked.
222  * Default implementation of is_trap_insn
223  * Returns true if @insn is a breakpoint instruction.
224  *
225  * This function is needed for the case where an architecture has multiple
226  * trap instructions (like powerpc).
227  */
228 bool __weak is_trap_insn(uprobe_opcode_t *insn)
229 {
230         return is_swbp_insn(insn);
231 }
232
233 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
234 {
235         void *kaddr = kmap_atomic(page);
236         memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
237         kunmap_atomic(kaddr);
238 }
239
240 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
241 {
242         void *kaddr = kmap_atomic(page);
243         memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
244         kunmap_atomic(kaddr);
245 }
246
247 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
248 {
249         uprobe_opcode_t old_opcode;
250         bool is_swbp;
251
252         /*
253          * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
254          * We do not check if it is any other 'trap variant' which could
255          * be conditional trap instruction such as the one powerpc supports.
256          *
257          * The logic is that we do not care if the underlying instruction
258          * is a trap variant; uprobes always wins over any other (gdb)
259          * breakpoint.
260          */
261         copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
262         is_swbp = is_swbp_insn(&old_opcode);
263
264         if (is_swbp_insn(new_opcode)) {
265                 if (is_swbp)            /* register: already installed? */
266                         return 0;
267         } else {
268                 if (!is_swbp)           /* unregister: was it changed by us? */
269                         return 0;
270         }
271
272         return 1;
273 }
274
275 /*
276  * NOTE:
277  * Expect the breakpoint instruction to be the smallest size instruction for
278  * the architecture. If an arch has variable length instruction and the
279  * breakpoint instruction is not of the smallest length instruction
280  * supported by that architecture then we need to modify is_trap_at_addr and
281  * uprobe_write_opcode accordingly. This would never be a problem for archs
282  * that have fixed length instructions.
283  *
284  * uprobe_write_opcode - write the opcode at a given virtual address.
285  * @mm: the probed process address space.
286  * @vaddr: the virtual address to store the opcode.
287  * @opcode: opcode to be written at @vaddr.
288  *
289  * Called with mm->mmap_sem held for write.
290  * Return 0 (success) or a negative errno.
291  */
292 int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
293                         uprobe_opcode_t opcode)
294 {
295         struct page *old_page, *new_page;
296         struct vm_area_struct *vma;
297         int ret;
298
299 retry:
300         /* Read the page with vaddr into memory */
301         ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
302         if (ret <= 0)
303                 return ret;
304
305         ret = verify_opcode(old_page, vaddr, &opcode);
306         if (ret <= 0)
307                 goto put_old;
308
309         ret = anon_vma_prepare(vma);
310         if (ret)
311                 goto put_old;
312
313         ret = -ENOMEM;
314         new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
315         if (!new_page)
316                 goto put_old;
317
318         __SetPageUptodate(new_page);
319         copy_highpage(new_page, old_page);
320         copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
321
322         ret = __replace_page(vma, vaddr, old_page, new_page);
323         page_cache_release(new_page);
324 put_old:
325         put_page(old_page);
326
327         if (unlikely(ret == -EAGAIN))
328                 goto retry;
329         return ret;
330 }
331
332 /**
333  * set_swbp - store breakpoint at a given address.
334  * @auprobe: arch specific probepoint information.
335  * @mm: the probed process address space.
336  * @vaddr: the virtual address to insert the opcode.
337  *
338  * For mm @mm, store the breakpoint instruction at @vaddr.
339  * Return 0 (success) or a negative errno.
340  */
341 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
342 {
343         return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
344 }
345
346 /**
347  * set_orig_insn - Restore the original instruction.
348  * @mm: the probed process address space.
349  * @auprobe: arch specific probepoint information.
350  * @vaddr: the virtual address to insert the opcode.
351  *
352  * For mm @mm, restore the original opcode (opcode) at @vaddr.
353  * Return 0 (success) or a negative errno.
354  */
355 int __weak
356 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
357 {
358         return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
359 }
360
361 static struct uprobe *get_uprobe(struct uprobe *uprobe)
362 {
363         atomic_inc(&uprobe->ref);
364         return uprobe;
365 }
366
367 static void put_uprobe(struct uprobe *uprobe)
368 {
369         if (atomic_dec_and_test(&uprobe->ref))
370                 kfree(uprobe);
371 }
372
373 static int match_uprobe(struct uprobe *l, struct uprobe *r)
374 {
375         if (l->inode < r->inode)
376                 return -1;
377
378         if (l->inode > r->inode)
379                 return 1;
380
381         if (l->offset < r->offset)
382                 return -1;
383
384         if (l->offset > r->offset)
385                 return 1;
386
387         return 0;
388 }
389
390 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
391 {
392         struct uprobe u = { .inode = inode, .offset = offset };
393         struct rb_node *n = uprobes_tree.rb_node;
394         struct uprobe *uprobe;
395         int match;
396
397         while (n) {
398                 uprobe = rb_entry(n, struct uprobe, rb_node);
399                 match = match_uprobe(&u, uprobe);
400                 if (!match)
401                         return get_uprobe(uprobe);
402
403                 if (match < 0)
404                         n = n->rb_left;
405                 else
406                         n = n->rb_right;
407         }
408         return NULL;
409 }
410
411 /*
412  * Find a uprobe corresponding to a given inode:offset
413  * Acquires uprobes_treelock
414  */
415 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
416 {
417         struct uprobe *uprobe;
418
419         spin_lock(&uprobes_treelock);
420         uprobe = __find_uprobe(inode, offset);
421         spin_unlock(&uprobes_treelock);
422
423         return uprobe;
424 }
425
426 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
427 {
428         struct rb_node **p = &uprobes_tree.rb_node;
429         struct rb_node *parent = NULL;
430         struct uprobe *u;
431         int match;
432
433         while (*p) {
434                 parent = *p;
435                 u = rb_entry(parent, struct uprobe, rb_node);
436                 match = match_uprobe(uprobe, u);
437                 if (!match)
438                         return get_uprobe(u);
439
440                 if (match < 0)
441                         p = &parent->rb_left;
442                 else
443                         p = &parent->rb_right;
444
445         }
446
447         u = NULL;
448         rb_link_node(&uprobe->rb_node, parent, p);
449         rb_insert_color(&uprobe->rb_node, &uprobes_tree);
450         /* get access + creation ref */
451         atomic_set(&uprobe->ref, 2);
452
453         return u;
454 }
455
456 /*
457  * Acquire uprobes_treelock.
458  * Matching uprobe already exists in rbtree;
459  *      increment (access refcount) and return the matching uprobe.
460  *
461  * No matching uprobe; insert the uprobe in rb_tree;
462  *      get a double refcount (access + creation) and return NULL.
463  */
464 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
465 {
466         struct uprobe *u;
467
468         spin_lock(&uprobes_treelock);
469         u = __insert_uprobe(uprobe);
470         spin_unlock(&uprobes_treelock);
471
472         return u;
473 }
474
475 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
476 {
477         struct uprobe *uprobe, *cur_uprobe;
478
479         uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
480         if (!uprobe)
481                 return NULL;
482
483         uprobe->inode = igrab(inode);
484         uprobe->offset = offset;
485         init_rwsem(&uprobe->register_rwsem);
486         init_rwsem(&uprobe->consumer_rwsem);
487
488         /* add to uprobes_tree, sorted on inode:offset */
489         cur_uprobe = insert_uprobe(uprobe);
490         /* a uprobe exists for this inode:offset combination */
491         if (cur_uprobe) {
492                 kfree(uprobe);
493                 uprobe = cur_uprobe;
494                 iput(inode);
495         }
496
497         return uprobe;
498 }
499
500 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
501 {
502         down_write(&uprobe->consumer_rwsem);
503         uc->next = uprobe->consumers;
504         uprobe->consumers = uc;
505         up_write(&uprobe->consumer_rwsem);
506 }
507
508 /*
509  * For uprobe @uprobe, delete the consumer @uc.
510  * Return true if the @uc is deleted successfully
511  * or return false.
512  */
513 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
514 {
515         struct uprobe_consumer **con;
516         bool ret = false;
517
518         down_write(&uprobe->consumer_rwsem);
519         for (con = &uprobe->consumers; *con; con = &(*con)->next) {
520                 if (*con == uc) {
521                         *con = uc->next;
522                         ret = true;
523                         break;
524                 }
525         }
526         up_write(&uprobe->consumer_rwsem);
527
528         return ret;
529 }
530
531 static int __copy_insn(struct address_space *mapping, struct file *filp,
532                         void *insn, int nbytes, loff_t offset)
533 {
534         struct page *page;
535         /*
536          * Ensure that the page that has the original instruction is populated
537          * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
538          * see uprobe_register().
539          */
540         if (mapping->a_ops->readpage)
541                 page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp);
542         else
543                 page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT);
544         if (IS_ERR(page))
545                 return PTR_ERR(page);
546
547         copy_from_page(page, offset, insn, nbytes);
548         page_cache_release(page);
549
550         return 0;
551 }
552
553 static int copy_insn(struct uprobe *uprobe, struct file *filp)
554 {
555         struct address_space *mapping = uprobe->inode->i_mapping;
556         loff_t offs = uprobe->offset;
557         void *insn = &uprobe->arch.insn;
558         int size = sizeof(uprobe->arch.insn);
559         int len, err = -EIO;
560
561         /* Copy only available bytes, -EIO if nothing was read */
562         do {
563                 if (offs >= i_size_read(uprobe->inode))
564                         break;
565
566                 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
567                 err = __copy_insn(mapping, filp, insn, len, offs);
568                 if (err)
569                         break;
570
571                 insn += len;
572                 offs += len;
573                 size -= len;
574         } while (size);
575
576         return err;
577 }
578
579 static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
580                                 struct mm_struct *mm, unsigned long vaddr)
581 {
582         int ret = 0;
583
584         if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
585                 return ret;
586
587         /* TODO: move this into _register, until then we abuse this sem. */
588         down_write(&uprobe->consumer_rwsem);
589         if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
590                 goto out;
591
592         ret = copy_insn(uprobe, file);
593         if (ret)
594                 goto out;
595
596         ret = -ENOTSUPP;
597         if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
598                 goto out;
599
600         ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
601         if (ret)
602                 goto out;
603
604         /* uprobe_write_opcode() assumes we don't cross page boundary */
605         BUG_ON((uprobe->offset & ~PAGE_MASK) +
606                         UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
607
608         smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
609         set_bit(UPROBE_COPY_INSN, &uprobe->flags);
610
611  out:
612         up_write(&uprobe->consumer_rwsem);
613
614         return ret;
615 }
616
617 static inline bool consumer_filter(struct uprobe_consumer *uc,
618                                    enum uprobe_filter_ctx ctx, struct mm_struct *mm)
619 {
620         return !uc->filter || uc->filter(uc, ctx, mm);
621 }
622
623 static bool filter_chain(struct uprobe *uprobe,
624                          enum uprobe_filter_ctx ctx, struct mm_struct *mm)
625 {
626         struct uprobe_consumer *uc;
627         bool ret = false;
628
629         down_read(&uprobe->consumer_rwsem);
630         for (uc = uprobe->consumers; uc; uc = uc->next) {
631                 ret = consumer_filter(uc, ctx, mm);
632                 if (ret)
633                         break;
634         }
635         up_read(&uprobe->consumer_rwsem);
636
637         return ret;
638 }
639
640 static int
641 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
642                         struct vm_area_struct *vma, unsigned long vaddr)
643 {
644         bool first_uprobe;
645         int ret;
646
647         ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
648         if (ret)
649                 return ret;
650
651         /*
652          * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
653          * the task can hit this breakpoint right after __replace_page().
654          */
655         first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
656         if (first_uprobe)
657                 set_bit(MMF_HAS_UPROBES, &mm->flags);
658
659         ret = set_swbp(&uprobe->arch, mm, vaddr);
660         if (!ret)
661                 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
662         else if (first_uprobe)
663                 clear_bit(MMF_HAS_UPROBES, &mm->flags);
664
665         return ret;
666 }
667
668 static int
669 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
670 {
671         set_bit(MMF_RECALC_UPROBES, &mm->flags);
672         return set_orig_insn(&uprobe->arch, mm, vaddr);
673 }
674
675 static inline bool uprobe_is_active(struct uprobe *uprobe)
676 {
677         return !RB_EMPTY_NODE(&uprobe->rb_node);
678 }
679 /*
680  * There could be threads that have already hit the breakpoint. They
681  * will recheck the current insn and restart if find_uprobe() fails.
682  * See find_active_uprobe().
683  */
684 static void delete_uprobe(struct uprobe *uprobe)
685 {
686         if (WARN_ON(!uprobe_is_active(uprobe)))
687                 return;
688
689         spin_lock(&uprobes_treelock);
690         rb_erase(&uprobe->rb_node, &uprobes_tree);
691         spin_unlock(&uprobes_treelock);
692         RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
693         iput(uprobe->inode);
694         put_uprobe(uprobe);
695 }
696
697 struct map_info {
698         struct map_info *next;
699         struct mm_struct *mm;
700         unsigned long vaddr;
701 };
702
703 static inline struct map_info *free_map_info(struct map_info *info)
704 {
705         struct map_info *next = info->next;
706         kfree(info);
707         return next;
708 }
709
710 static struct map_info *
711 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
712 {
713         unsigned long pgoff = offset >> PAGE_SHIFT;
714         struct vm_area_struct *vma;
715         struct map_info *curr = NULL;
716         struct map_info *prev = NULL;
717         struct map_info *info;
718         int more = 0;
719
720  again:
721         i_mmap_lock_read(mapping);
722         vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
723                 if (!valid_vma(vma, is_register))
724                         continue;
725
726                 if (!prev && !more) {
727                         /*
728                          * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
729                          * reclaim. This is optimistic, no harm done if it fails.
730                          */
731                         prev = kmalloc(sizeof(struct map_info),
732                                         GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
733                         if (prev)
734                                 prev->next = NULL;
735                 }
736                 if (!prev) {
737                         more++;
738                         continue;
739                 }
740
741                 if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
742                         continue;
743
744                 info = prev;
745                 prev = prev->next;
746                 info->next = curr;
747                 curr = info;
748
749                 info->mm = vma->vm_mm;
750                 info->vaddr = offset_to_vaddr(vma, offset);
751         }
752         i_mmap_unlock_read(mapping);
753
754         if (!more)
755                 goto out;
756
757         prev = curr;
758         while (curr) {
759                 mmput(curr->mm);
760                 curr = curr->next;
761         }
762
763         do {
764                 info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
765                 if (!info) {
766                         curr = ERR_PTR(-ENOMEM);
767                         goto out;
768                 }
769                 info->next = prev;
770                 prev = info;
771         } while (--more);
772
773         goto again;
774  out:
775         while (prev)
776                 prev = free_map_info(prev);
777         return curr;
778 }
779
780 static int
781 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
782 {
783         bool is_register = !!new;
784         struct map_info *info;
785         int err = 0;
786
787         percpu_down_write(&dup_mmap_sem);
788         info = build_map_info(uprobe->inode->i_mapping,
789                                         uprobe->offset, is_register);
790         if (IS_ERR(info)) {
791                 err = PTR_ERR(info);
792                 goto out;
793         }
794
795         while (info) {
796                 struct mm_struct *mm = info->mm;
797                 struct vm_area_struct *vma;
798
799                 if (err && is_register)
800                         goto free;
801
802                 down_write(&mm->mmap_sem);
803                 vma = find_vma(mm, info->vaddr);
804                 if (!vma || !valid_vma(vma, is_register) ||
805                     file_inode(vma->vm_file) != uprobe->inode)
806                         goto unlock;
807
808                 if (vma->vm_start > info->vaddr ||
809                     vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
810                         goto unlock;
811
812                 if (is_register) {
813                         /* consult only the "caller", new consumer. */
814                         if (consumer_filter(new,
815                                         UPROBE_FILTER_REGISTER, mm))
816                                 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
817                 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
818                         if (!filter_chain(uprobe,
819                                         UPROBE_FILTER_UNREGISTER, mm))
820                                 err |= remove_breakpoint(uprobe, mm, info->vaddr);
821                 }
822
823  unlock:
824                 up_write(&mm->mmap_sem);
825  free:
826                 mmput(mm);
827                 info = free_map_info(info);
828         }
829  out:
830         percpu_up_write(&dup_mmap_sem);
831         return err;
832 }
833
834 static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
835 {
836         consumer_add(uprobe, uc);
837         return register_for_each_vma(uprobe, uc);
838 }
839
840 static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
841 {
842         int err;
843
844         if (WARN_ON(!consumer_del(uprobe, uc)))
845                 return;
846
847         err = register_for_each_vma(uprobe, NULL);
848         /* TODO : cant unregister? schedule a worker thread */
849         if (!uprobe->consumers && !err)
850                 delete_uprobe(uprobe);
851 }
852
853 /*
854  * uprobe_register - register a probe
855  * @inode: the file in which the probe has to be placed.
856  * @offset: offset from the start of the file.
857  * @uc: information on howto handle the probe..
858  *
859  * Apart from the access refcount, uprobe_register() takes a creation
860  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
861  * inserted into the rbtree (i.e first consumer for a @inode:@offset
862  * tuple).  Creation refcount stops uprobe_unregister from freeing the
863  * @uprobe even before the register operation is complete. Creation
864  * refcount is released when the last @uc for the @uprobe
865  * unregisters.
866  *
867  * Return errno if it cannot successully install probes
868  * else return 0 (success)
869  */
870 int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
871 {
872         struct uprobe *uprobe;
873         int ret;
874
875         /* Uprobe must have at least one set consumer */
876         if (!uc->handler && !uc->ret_handler)
877                 return -EINVAL;
878
879         /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
880         if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
881                 return -EIO;
882         /* Racy, just to catch the obvious mistakes */
883         if (offset > i_size_read(inode))
884                 return -EINVAL;
885
886  retry:
887         uprobe = alloc_uprobe(inode, offset);
888         if (!uprobe)
889                 return -ENOMEM;
890         /*
891          * We can race with uprobe_unregister()->delete_uprobe().
892          * Check uprobe_is_active() and retry if it is false.
893          */
894         down_write(&uprobe->register_rwsem);
895         ret = -EAGAIN;
896         if (likely(uprobe_is_active(uprobe))) {
897                 ret = __uprobe_register(uprobe, uc);
898                 if (ret)
899                         __uprobe_unregister(uprobe, uc);
900         }
901         up_write(&uprobe->register_rwsem);
902         put_uprobe(uprobe);
903
904         if (unlikely(ret == -EAGAIN))
905                 goto retry;
906         return ret;
907 }
908 EXPORT_SYMBOL_GPL(uprobe_register);
909
910 /*
911  * uprobe_apply - unregister a already registered probe.
912  * @inode: the file in which the probe has to be removed.
913  * @offset: offset from the start of the file.
914  * @uc: consumer which wants to add more or remove some breakpoints
915  * @add: add or remove the breakpoints
916  */
917 int uprobe_apply(struct inode *inode, loff_t offset,
918                         struct uprobe_consumer *uc, bool add)
919 {
920         struct uprobe *uprobe;
921         struct uprobe_consumer *con;
922         int ret = -ENOENT;
923
924         uprobe = find_uprobe(inode, offset);
925         if (WARN_ON(!uprobe))
926                 return ret;
927
928         down_write(&uprobe->register_rwsem);
929         for (con = uprobe->consumers; con && con != uc ; con = con->next)
930                 ;
931         if (con)
932                 ret = register_for_each_vma(uprobe, add ? uc : NULL);
933         up_write(&uprobe->register_rwsem);
934         put_uprobe(uprobe);
935
936         return ret;
937 }
938
939 /*
940  * uprobe_unregister - unregister a already registered probe.
941  * @inode: the file in which the probe has to be removed.
942  * @offset: offset from the start of the file.
943  * @uc: identify which probe if multiple probes are colocated.
944  */
945 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
946 {
947         struct uprobe *uprobe;
948
949         uprobe = find_uprobe(inode, offset);
950         if (WARN_ON(!uprobe))
951                 return;
952
953         down_write(&uprobe->register_rwsem);
954         __uprobe_unregister(uprobe, uc);
955         up_write(&uprobe->register_rwsem);
956         put_uprobe(uprobe);
957 }
958 EXPORT_SYMBOL_GPL(uprobe_unregister);
959
960 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
961 {
962         struct vm_area_struct *vma;
963         int err = 0;
964
965         down_read(&mm->mmap_sem);
966         for (vma = mm->mmap; vma; vma = vma->vm_next) {
967                 unsigned long vaddr;
968                 loff_t offset;
969
970                 if (!valid_vma(vma, false) ||
971                     file_inode(vma->vm_file) != uprobe->inode)
972                         continue;
973
974                 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
975                 if (uprobe->offset <  offset ||
976                     uprobe->offset >= offset + vma->vm_end - vma->vm_start)
977                         continue;
978
979                 vaddr = offset_to_vaddr(vma, uprobe->offset);
980                 err |= remove_breakpoint(uprobe, mm, vaddr);
981         }
982         up_read(&mm->mmap_sem);
983
984         return err;
985 }
986
987 static struct rb_node *
988 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
989 {
990         struct rb_node *n = uprobes_tree.rb_node;
991
992         while (n) {
993                 struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
994
995                 if (inode < u->inode) {
996                         n = n->rb_left;
997                 } else if (inode > u->inode) {
998                         n = n->rb_right;
999                 } else {
1000                         if (max < u->offset)
1001                                 n = n->rb_left;
1002                         else if (min > u->offset)
1003                                 n = n->rb_right;
1004                         else
1005                                 break;
1006                 }
1007         }
1008
1009         return n;
1010 }
1011
1012 /*
1013  * For a given range in vma, build a list of probes that need to be inserted.
1014  */
1015 static void build_probe_list(struct inode *inode,
1016                                 struct vm_area_struct *vma,
1017                                 unsigned long start, unsigned long end,
1018                                 struct list_head *head)
1019 {
1020         loff_t min, max;
1021         struct rb_node *n, *t;
1022         struct uprobe *u;
1023
1024         INIT_LIST_HEAD(head);
1025         min = vaddr_to_offset(vma, start);
1026         max = min + (end - start) - 1;
1027
1028         spin_lock(&uprobes_treelock);
1029         n = find_node_in_range(inode, min, max);
1030         if (n) {
1031                 for (t = n; t; t = rb_prev(t)) {
1032                         u = rb_entry(t, struct uprobe, rb_node);
1033                         if (u->inode != inode || u->offset < min)
1034                                 break;
1035                         list_add(&u->pending_list, head);
1036                         get_uprobe(u);
1037                 }
1038                 for (t = n; (t = rb_next(t)); ) {
1039                         u = rb_entry(t, struct uprobe, rb_node);
1040                         if (u->inode != inode || u->offset > max)
1041                                 break;
1042                         list_add(&u->pending_list, head);
1043                         get_uprobe(u);
1044                 }
1045         }
1046         spin_unlock(&uprobes_treelock);
1047 }
1048
1049 /*
1050  * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1051  *
1052  * Currently we ignore all errors and always return 0, the callers
1053  * can't handle the failure anyway.
1054  */
1055 int uprobe_mmap(struct vm_area_struct *vma)
1056 {
1057         struct list_head tmp_list;
1058         struct uprobe *uprobe, *u;
1059         struct inode *inode;
1060
1061         if (no_uprobe_events() || !valid_vma(vma, true))
1062                 return 0;
1063
1064         inode = file_inode(vma->vm_file);
1065         if (!inode)
1066                 return 0;
1067
1068         mutex_lock(uprobes_mmap_hash(inode));
1069         build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1070         /*
1071          * We can race with uprobe_unregister(), this uprobe can be already
1072          * removed. But in this case filter_chain() must return false, all
1073          * consumers have gone away.
1074          */
1075         list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1076                 if (!fatal_signal_pending(current) &&
1077                     filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1078                         unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1079                         install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1080                 }
1081                 put_uprobe(uprobe);
1082         }
1083         mutex_unlock(uprobes_mmap_hash(inode));
1084
1085         return 0;
1086 }
1087
1088 static bool
1089 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1090 {
1091         loff_t min, max;
1092         struct inode *inode;
1093         struct rb_node *n;
1094
1095         inode = file_inode(vma->vm_file);
1096
1097         min = vaddr_to_offset(vma, start);
1098         max = min + (end - start) - 1;
1099
1100         spin_lock(&uprobes_treelock);
1101         n = find_node_in_range(inode, min, max);
1102         spin_unlock(&uprobes_treelock);
1103
1104         return !!n;
1105 }
1106
1107 /*
1108  * Called in context of a munmap of a vma.
1109  */
1110 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1111 {
1112         if (no_uprobe_events() || !valid_vma(vma, false))
1113                 return;
1114
1115         if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1116                 return;
1117
1118         if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1119              test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1120                 return;
1121
1122         if (vma_has_uprobes(vma, start, end))
1123                 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1124 }
1125
1126 /* Slot allocation for XOL */
1127 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1128 {
1129         struct vm_area_struct *vma;
1130         int ret;
1131
1132         down_write(&mm->mmap_sem);
1133         if (mm->uprobes_state.xol_area) {
1134                 ret = -EALREADY;
1135                 goto fail;
1136         }
1137
1138         if (!area->vaddr) {
1139                 /* Try to map as high as possible, this is only a hint. */
1140                 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1141                                                 PAGE_SIZE, 0, 0);
1142                 if (area->vaddr & ~PAGE_MASK) {
1143                         ret = area->vaddr;
1144                         goto fail;
1145                 }
1146         }
1147
1148         vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1149                                 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1150                                 &area->xol_mapping);
1151         if (IS_ERR(vma)) {
1152                 ret = PTR_ERR(vma);
1153                 goto fail;
1154         }
1155
1156         ret = 0;
1157         smp_wmb();      /* pairs with get_xol_area() */
1158         mm->uprobes_state.xol_area = area;
1159  fail:
1160         up_write(&mm->mmap_sem);
1161
1162         return ret;
1163 }
1164
1165 static struct xol_area *__create_xol_area(unsigned long vaddr)
1166 {
1167         struct mm_struct *mm = current->mm;
1168         uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1169         struct xol_area *area;
1170
1171         area = kmalloc(sizeof(*area), GFP_KERNEL);
1172         if (unlikely(!area))
1173                 goto out;
1174
1175         area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1176         if (!area->bitmap)
1177                 goto free_area;
1178
1179         area->xol_mapping.name = "[uprobes]";
1180         area->xol_mapping.pages = area->pages;
1181         area->pages[0] = alloc_page(GFP_HIGHUSER);
1182         if (!area->pages[0])
1183                 goto free_bitmap;
1184         area->pages[1] = NULL;
1185
1186         area->vaddr = vaddr;
1187         init_waitqueue_head(&area->wq);
1188         /* Reserve the 1st slot for get_trampoline_vaddr() */
1189         set_bit(0, area->bitmap);
1190         atomic_set(&area->slot_count, 1);
1191         copy_to_page(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1192
1193         if (!xol_add_vma(mm, area))
1194                 return area;
1195
1196         __free_page(area->pages[0]);
1197  free_bitmap:
1198         kfree(area->bitmap);
1199  free_area:
1200         kfree(area);
1201  out:
1202         return NULL;
1203 }
1204
1205 /*
1206  * get_xol_area - Allocate process's xol_area if necessary.
1207  * This area will be used for storing instructions for execution out of line.
1208  *
1209  * Returns the allocated area or NULL.
1210  */
1211 static struct xol_area *get_xol_area(void)
1212 {
1213         struct mm_struct *mm = current->mm;
1214         struct xol_area *area;
1215
1216         if (!mm->uprobes_state.xol_area)
1217                 __create_xol_area(0);
1218
1219         area = mm->uprobes_state.xol_area;
1220         smp_read_barrier_depends();     /* pairs with wmb in xol_add_vma() */
1221         return area;
1222 }
1223
1224 /*
1225  * uprobe_clear_state - Free the area allocated for slots.
1226  */
1227 void uprobe_clear_state(struct mm_struct *mm)
1228 {
1229         struct xol_area *area = mm->uprobes_state.xol_area;
1230
1231         if (!area)
1232                 return;
1233
1234         put_page(area->pages[0]);
1235         kfree(area->bitmap);
1236         kfree(area);
1237 }
1238
1239 void uprobe_start_dup_mmap(void)
1240 {
1241         percpu_down_read(&dup_mmap_sem);
1242 }
1243
1244 void uprobe_end_dup_mmap(void)
1245 {
1246         percpu_up_read(&dup_mmap_sem);
1247 }
1248
1249 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1250 {
1251         newmm->uprobes_state.xol_area = NULL;
1252
1253         if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1254                 set_bit(MMF_HAS_UPROBES, &newmm->flags);
1255                 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1256                 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1257         }
1258 }
1259
1260 /*
1261  *  - search for a free slot.
1262  */
1263 static unsigned long xol_take_insn_slot(struct xol_area *area)
1264 {
1265         unsigned long slot_addr;
1266         int slot_nr;
1267
1268         do {
1269                 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1270                 if (slot_nr < UINSNS_PER_PAGE) {
1271                         if (!test_and_set_bit(slot_nr, area->bitmap))
1272                                 break;
1273
1274                         slot_nr = UINSNS_PER_PAGE;
1275                         continue;
1276                 }
1277                 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1278         } while (slot_nr >= UINSNS_PER_PAGE);
1279
1280         slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1281         atomic_inc(&area->slot_count);
1282
1283         return slot_addr;
1284 }
1285
1286 /*
1287  * xol_get_insn_slot - allocate a slot for xol.
1288  * Returns the allocated slot address or 0.
1289  */
1290 static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1291 {
1292         struct xol_area *area;
1293         unsigned long xol_vaddr;
1294
1295         area = get_xol_area();
1296         if (!area)
1297                 return 0;
1298
1299         xol_vaddr = xol_take_insn_slot(area);
1300         if (unlikely(!xol_vaddr))
1301                 return 0;
1302
1303         arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1304                               &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1305
1306         return xol_vaddr;
1307 }
1308
1309 /*
1310  * xol_free_insn_slot - If slot was earlier allocated by
1311  * @xol_get_insn_slot(), make the slot available for
1312  * subsequent requests.
1313  */
1314 static void xol_free_insn_slot(struct task_struct *tsk)
1315 {
1316         struct xol_area *area;
1317         unsigned long vma_end;
1318         unsigned long slot_addr;
1319
1320         if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1321                 return;
1322
1323         slot_addr = tsk->utask->xol_vaddr;
1324         if (unlikely(!slot_addr))
1325                 return;
1326
1327         area = tsk->mm->uprobes_state.xol_area;
1328         vma_end = area->vaddr + PAGE_SIZE;
1329         if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1330                 unsigned long offset;
1331                 int slot_nr;
1332
1333                 offset = slot_addr - area->vaddr;
1334                 slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1335                 if (slot_nr >= UINSNS_PER_PAGE)
1336                         return;
1337
1338                 clear_bit(slot_nr, area->bitmap);
1339                 atomic_dec(&area->slot_count);
1340                 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1341                 if (waitqueue_active(&area->wq))
1342                         wake_up(&area->wq);
1343
1344                 tsk->utask->xol_vaddr = 0;
1345         }
1346 }
1347
1348 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1349                                   void *src, unsigned long len)
1350 {
1351         /* Initialize the slot */
1352         copy_to_page(page, vaddr, src, len);
1353
1354         /*
1355          * We probably need flush_icache_user_range() but it needs vma.
1356          * This should work on most of architectures by default. If
1357          * architecture needs to do something different it can define
1358          * its own version of the function.
1359          */
1360         flush_dcache_page(page);
1361 }
1362
1363 /**
1364  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1365  * @regs: Reflects the saved state of the task after it has hit a breakpoint
1366  * instruction.
1367  * Return the address of the breakpoint instruction.
1368  */
1369 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1370 {
1371         return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1372 }
1373
1374 unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1375 {
1376         struct uprobe_task *utask = current->utask;
1377
1378         if (unlikely(utask && utask->active_uprobe))
1379                 return utask->vaddr;
1380
1381         return instruction_pointer(regs);
1382 }
1383
1384 static struct return_instance *free_ret_instance(struct return_instance *ri)
1385 {
1386         struct return_instance *next = ri->next;
1387         put_uprobe(ri->uprobe);
1388         kfree(ri);
1389         return next;
1390 }
1391
1392 /*
1393  * Called with no locks held.
1394  * Called in context of a exiting or a exec-ing thread.
1395  */
1396 void uprobe_free_utask(struct task_struct *t)
1397 {
1398         struct uprobe_task *utask = t->utask;
1399         struct return_instance *ri;
1400
1401         if (!utask)
1402                 return;
1403
1404         if (utask->active_uprobe)
1405                 put_uprobe(utask->active_uprobe);
1406
1407         ri = utask->return_instances;
1408         while (ri)
1409                 ri = free_ret_instance(ri);
1410
1411         xol_free_insn_slot(t);
1412         kfree(utask);
1413         t->utask = NULL;
1414 }
1415
1416 /*
1417  * Allocate a uprobe_task object for the task if if necessary.
1418  * Called when the thread hits a breakpoint.
1419  *
1420  * Returns:
1421  * - pointer to new uprobe_task on success
1422  * - NULL otherwise
1423  */
1424 static struct uprobe_task *get_utask(void)
1425 {
1426         if (!current->utask)
1427                 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1428         return current->utask;
1429 }
1430
1431 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1432 {
1433         struct uprobe_task *n_utask;
1434         struct return_instance **p, *o, *n;
1435
1436         n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1437         if (!n_utask)
1438                 return -ENOMEM;
1439         t->utask = n_utask;
1440
1441         p = &n_utask->return_instances;
1442         for (o = o_utask->return_instances; o; o = o->next) {
1443                 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1444                 if (!n)
1445                         return -ENOMEM;
1446
1447                 *n = *o;
1448                 get_uprobe(n->uprobe);
1449                 n->next = NULL;
1450
1451                 *p = n;
1452                 p = &n->next;
1453                 n_utask->depth++;
1454         }
1455
1456         return 0;
1457 }
1458
1459 static void uprobe_warn(struct task_struct *t, const char *msg)
1460 {
1461         pr_warn("uprobe: %s:%d failed to %s\n",
1462                         current->comm, current->pid, msg);
1463 }
1464
1465 static void dup_xol_work(struct callback_head *work)
1466 {
1467         if (current->flags & PF_EXITING)
1468                 return;
1469
1470         if (!__create_xol_area(current->utask->dup_xol_addr))
1471                 uprobe_warn(current, "dup xol area");
1472 }
1473
1474 /*
1475  * Called in context of a new clone/fork from copy_process.
1476  */
1477 void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1478 {
1479         struct uprobe_task *utask = current->utask;
1480         struct mm_struct *mm = current->mm;
1481         struct xol_area *area;
1482
1483         t->utask = NULL;
1484
1485         if (!utask || !utask->return_instances)
1486                 return;
1487
1488         if (mm == t->mm && !(flags & CLONE_VFORK))
1489                 return;
1490
1491         if (dup_utask(t, utask))
1492                 return uprobe_warn(t, "dup ret instances");
1493
1494         /* The task can fork() after dup_xol_work() fails */
1495         area = mm->uprobes_state.xol_area;
1496         if (!area)
1497                 return uprobe_warn(t, "dup xol area");
1498
1499         if (mm == t->mm)
1500                 return;
1501
1502         t->utask->dup_xol_addr = area->vaddr;
1503         init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1504         task_work_add(t, &t->utask->dup_xol_work, true);
1505 }
1506
1507 /*
1508  * Current area->vaddr notion assume the trampoline address is always
1509  * equal area->vaddr.
1510  *
1511  * Returns -1 in case the xol_area is not allocated.
1512  */
1513 static unsigned long get_trampoline_vaddr(void)
1514 {
1515         struct xol_area *area;
1516         unsigned long trampoline_vaddr = -1;
1517
1518         area = current->mm->uprobes_state.xol_area;
1519         smp_read_barrier_depends();
1520         if (area)
1521                 trampoline_vaddr = area->vaddr;
1522
1523         return trampoline_vaddr;
1524 }
1525
1526 static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1527                                         struct pt_regs *regs)
1528 {
1529         struct return_instance *ri = utask->return_instances;
1530         enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
1531
1532         while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1533                 ri = free_ret_instance(ri);
1534                 utask->depth--;
1535         }
1536         utask->return_instances = ri;
1537 }
1538
1539 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1540 {
1541         struct return_instance *ri;
1542         struct uprobe_task *utask;
1543         unsigned long orig_ret_vaddr, trampoline_vaddr;
1544         bool chained;
1545
1546         if (!get_xol_area())
1547                 return;
1548
1549         utask = get_utask();
1550         if (!utask)
1551                 return;
1552
1553         if (utask->depth >= MAX_URETPROBE_DEPTH) {
1554                 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1555                                 " nestedness limit pid/tgid=%d/%d\n",
1556                                 current->pid, current->tgid);
1557                 return;
1558         }
1559
1560         ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1561         if (!ri)
1562                 return;
1563
1564         trampoline_vaddr = get_trampoline_vaddr();
1565         orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1566         if (orig_ret_vaddr == -1)
1567                 goto fail;
1568
1569         /* drop the entries invalidated by longjmp() */
1570         chained = (orig_ret_vaddr == trampoline_vaddr);
1571         cleanup_return_instances(utask, chained, regs);
1572
1573         /*
1574          * We don't want to keep trampoline address in stack, rather keep the
1575          * original return address of first caller thru all the consequent
1576          * instances. This also makes breakpoint unwrapping easier.
1577          */
1578         if (chained) {
1579                 if (!utask->return_instances) {
1580                         /*
1581                          * This situation is not possible. Likely we have an
1582                          * attack from user-space.
1583                          */
1584                         uprobe_warn(current, "handle tail call");
1585                         goto fail;
1586                 }
1587                 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1588         }
1589
1590         ri->uprobe = get_uprobe(uprobe);
1591         ri->func = instruction_pointer(regs);
1592         ri->stack = user_stack_pointer(regs);
1593         ri->orig_ret_vaddr = orig_ret_vaddr;
1594         ri->chained = chained;
1595
1596         utask->depth++;
1597         ri->next = utask->return_instances;
1598         utask->return_instances = ri;
1599
1600         return;
1601  fail:
1602         kfree(ri);
1603 }
1604
1605 /* Prepare to single-step probed instruction out of line. */
1606 static int
1607 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1608 {
1609         struct uprobe_task *utask;
1610         unsigned long xol_vaddr;
1611         int err;
1612
1613         utask = get_utask();
1614         if (!utask)
1615                 return -ENOMEM;
1616
1617         xol_vaddr = xol_get_insn_slot(uprobe);
1618         if (!xol_vaddr)
1619                 return -ENOMEM;
1620
1621         utask->xol_vaddr = xol_vaddr;
1622         utask->vaddr = bp_vaddr;
1623
1624         err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1625         if (unlikely(err)) {
1626                 xol_free_insn_slot(current);
1627                 return err;
1628         }
1629
1630         utask->active_uprobe = uprobe;
1631         utask->state = UTASK_SSTEP;
1632         return 0;
1633 }
1634
1635 /*
1636  * If we are singlestepping, then ensure this thread is not connected to
1637  * non-fatal signals until completion of singlestep.  When xol insn itself
1638  * triggers the signal,  restart the original insn even if the task is
1639  * already SIGKILL'ed (since coredump should report the correct ip).  This
1640  * is even more important if the task has a handler for SIGSEGV/etc, The
1641  * _same_ instruction should be repeated again after return from the signal
1642  * handler, and SSTEP can never finish in this case.
1643  */
1644 bool uprobe_deny_signal(void)
1645 {
1646         struct task_struct *t = current;
1647         struct uprobe_task *utask = t->utask;
1648
1649         if (likely(!utask || !utask->active_uprobe))
1650                 return false;
1651
1652         WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1653
1654         if (signal_pending(t)) {
1655                 spin_lock_irq(&t->sighand->siglock);
1656                 clear_tsk_thread_flag(t, TIF_SIGPENDING);
1657                 spin_unlock_irq(&t->sighand->siglock);
1658
1659                 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1660                         utask->state = UTASK_SSTEP_TRAPPED;
1661                         set_tsk_thread_flag(t, TIF_UPROBE);
1662                 }
1663         }
1664
1665         return true;
1666 }
1667
1668 static void mmf_recalc_uprobes(struct mm_struct *mm)
1669 {
1670         struct vm_area_struct *vma;
1671
1672         for (vma = mm->mmap; vma; vma = vma->vm_next) {
1673                 if (!valid_vma(vma, false))
1674                         continue;
1675                 /*
1676                  * This is not strictly accurate, we can race with
1677                  * uprobe_unregister() and see the already removed
1678                  * uprobe if delete_uprobe() was not yet called.
1679                  * Or this uprobe can be filtered out.
1680                  */
1681                 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1682                         return;
1683         }
1684
1685         clear_bit(MMF_HAS_UPROBES, &mm->flags);
1686 }
1687
1688 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
1689 {
1690         struct page *page;
1691         uprobe_opcode_t opcode;
1692         int result;
1693
1694         pagefault_disable();
1695         result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
1696                                                         sizeof(opcode));
1697         pagefault_enable();
1698
1699         if (likely(result == 0))
1700                 goto out;
1701
1702         result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
1703         if (result < 0)
1704                 return result;
1705
1706         copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
1707         put_page(page);
1708  out:
1709         /* This needs to return true for any variant of the trap insn */
1710         return is_trap_insn(&opcode);
1711 }
1712
1713 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1714 {
1715         struct mm_struct *mm = current->mm;
1716         struct uprobe *uprobe = NULL;
1717         struct vm_area_struct *vma;
1718
1719         down_read(&mm->mmap_sem);
1720         vma = find_vma(mm, bp_vaddr);
1721         if (vma && vma->vm_start <= bp_vaddr) {
1722                 if (valid_vma(vma, false)) {
1723                         struct inode *inode = file_inode(vma->vm_file);
1724                         loff_t offset = vaddr_to_offset(vma, bp_vaddr);
1725
1726                         uprobe = find_uprobe(inode, offset);
1727                 }
1728
1729                 if (!uprobe)
1730                         *is_swbp = is_trap_at_addr(mm, bp_vaddr);
1731         } else {
1732                 *is_swbp = -EFAULT;
1733         }
1734
1735         if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1736                 mmf_recalc_uprobes(mm);
1737         up_read(&mm->mmap_sem);
1738
1739         return uprobe;
1740 }
1741
1742 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
1743 {
1744         struct uprobe_consumer *uc;
1745         int remove = UPROBE_HANDLER_REMOVE;
1746         bool need_prep = false; /* prepare return uprobe, when needed */
1747
1748         down_read(&uprobe->register_rwsem);
1749         for (uc = uprobe->consumers; uc; uc = uc->next) {
1750                 int rc = 0;
1751
1752                 if (uc->handler) {
1753                         rc = uc->handler(uc, regs);
1754                         WARN(rc & ~UPROBE_HANDLER_MASK,
1755                                 "bad rc=0x%x from %pf()\n", rc, uc->handler);
1756                 }
1757
1758                 if (uc->ret_handler)
1759                         need_prep = true;
1760
1761                 remove &= rc;
1762         }
1763
1764         if (need_prep && !remove)
1765                 prepare_uretprobe(uprobe, regs); /* put bp at return */
1766
1767         if (remove && uprobe->consumers) {
1768                 WARN_ON(!uprobe_is_active(uprobe));
1769                 unapply_uprobe(uprobe, current->mm);
1770         }
1771         up_read(&uprobe->register_rwsem);
1772 }
1773
1774 static void
1775 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
1776 {
1777         struct uprobe *uprobe = ri->uprobe;
1778         struct uprobe_consumer *uc;
1779
1780         down_read(&uprobe->register_rwsem);
1781         for (uc = uprobe->consumers; uc; uc = uc->next) {
1782                 if (uc->ret_handler)
1783                         uc->ret_handler(uc, ri->func, regs);
1784         }
1785         up_read(&uprobe->register_rwsem);
1786 }
1787
1788 static struct return_instance *find_next_ret_chain(struct return_instance *ri)
1789 {
1790         bool chained;
1791
1792         do {
1793                 chained = ri->chained;
1794                 ri = ri->next;  /* can't be NULL if chained */
1795         } while (chained);
1796
1797         return ri;
1798 }
1799
1800 static void handle_trampoline(struct pt_regs *regs)
1801 {
1802         struct uprobe_task *utask;
1803         struct return_instance *ri, *next;
1804         bool valid;
1805
1806         utask = current->utask;
1807         if (!utask)
1808                 goto sigill;
1809
1810         ri = utask->return_instances;
1811         if (!ri)
1812                 goto sigill;
1813
1814         do {
1815                 /*
1816                  * We should throw out the frames invalidated by longjmp().
1817                  * If this chain is valid, then the next one should be alive
1818                  * or NULL; the latter case means that nobody but ri->func
1819                  * could hit this trampoline on return. TODO: sigaltstack().
1820                  */
1821                 next = find_next_ret_chain(ri);
1822                 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
1823
1824                 instruction_pointer_set(regs, ri->orig_ret_vaddr);
1825                 do {
1826                         if (valid)
1827                                 handle_uretprobe_chain(ri, regs);
1828                         ri = free_ret_instance(ri);
1829                         utask->depth--;
1830                 } while (ri != next);
1831         } while (!valid);
1832
1833         utask->return_instances = ri;
1834         return;
1835
1836  sigill:
1837         uprobe_warn(current, "handle uretprobe, sending SIGILL.");
1838         force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1839
1840 }
1841
1842 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
1843 {
1844         return false;
1845 }
1846
1847 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1848                                         struct pt_regs *regs)
1849 {
1850         return true;
1851 }
1852
1853 /*
1854  * Run handler and ask thread to singlestep.
1855  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1856  */
1857 static void handle_swbp(struct pt_regs *regs)
1858 {
1859         struct uprobe *uprobe;
1860         unsigned long bp_vaddr;
1861         int uninitialized_var(is_swbp);
1862
1863         bp_vaddr = uprobe_get_swbp_addr(regs);
1864         if (bp_vaddr == get_trampoline_vaddr())
1865                 return handle_trampoline(regs);
1866
1867         uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
1868         if (!uprobe) {
1869                 if (is_swbp > 0) {
1870                         /* No matching uprobe; signal SIGTRAP. */
1871                         send_sig(SIGTRAP, current, 0);
1872                 } else {
1873                         /*
1874                          * Either we raced with uprobe_unregister() or we can't
1875                          * access this memory. The latter is only possible if
1876                          * another thread plays with our ->mm. In both cases
1877                          * we can simply restart. If this vma was unmapped we
1878                          * can pretend this insn was not executed yet and get
1879                          * the (correct) SIGSEGV after restart.
1880                          */
1881                         instruction_pointer_set(regs, bp_vaddr);
1882                 }
1883                 return;
1884         }
1885
1886         /* change it in advance for ->handler() and restart */
1887         instruction_pointer_set(regs, bp_vaddr);
1888
1889         /*
1890          * TODO: move copy_insn/etc into _register and remove this hack.
1891          * After we hit the bp, _unregister + _register can install the
1892          * new and not-yet-analyzed uprobe at the same address, restart.
1893          */
1894         smp_rmb(); /* pairs with wmb() in install_breakpoint() */
1895         if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
1896                 goto out;
1897
1898         /* Tracing handlers use ->utask to communicate with fetch methods */
1899         if (!get_utask())
1900                 goto out;
1901
1902         if (arch_uprobe_ignore(&uprobe->arch, regs))
1903                 goto out;
1904
1905         handler_chain(uprobe, regs);
1906
1907         if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1908                 goto out;
1909
1910         if (!pre_ssout(uprobe, regs, bp_vaddr))
1911                 return;
1912
1913         /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
1914 out:
1915         put_uprobe(uprobe);
1916 }
1917
1918 /*
1919  * Perform required fix-ups and disable singlestep.
1920  * Allow pending signals to take effect.
1921  */
1922 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1923 {
1924         struct uprobe *uprobe;
1925         int err = 0;
1926
1927         uprobe = utask->active_uprobe;
1928         if (utask->state == UTASK_SSTEP_ACK)
1929                 err = arch_uprobe_post_xol(&uprobe->arch, regs);
1930         else if (utask->state == UTASK_SSTEP_TRAPPED)
1931                 arch_uprobe_abort_xol(&uprobe->arch, regs);
1932         else
1933                 WARN_ON_ONCE(1);
1934
1935         put_uprobe(uprobe);
1936         utask->active_uprobe = NULL;
1937         utask->state = UTASK_RUNNING;
1938         xol_free_insn_slot(current);
1939
1940         spin_lock_irq(&current->sighand->siglock);
1941         recalc_sigpending(); /* see uprobe_deny_signal() */
1942         spin_unlock_irq(&current->sighand->siglock);
1943
1944         if (unlikely(err)) {
1945                 uprobe_warn(current, "execute the probed insn, sending SIGILL.");
1946                 force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1947         }
1948 }
1949
1950 /*
1951  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
1952  * allows the thread to return from interrupt. After that handle_swbp()
1953  * sets utask->active_uprobe.
1954  *
1955  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
1956  * and allows the thread to return from interrupt.
1957  *
1958  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1959  * uprobe_notify_resume().
1960  */
1961 void uprobe_notify_resume(struct pt_regs *regs)
1962 {
1963         struct uprobe_task *utask;
1964
1965         clear_thread_flag(TIF_UPROBE);
1966
1967         utask = current->utask;
1968         if (utask && utask->active_uprobe)
1969                 handle_singlestep(utask, regs);
1970         else
1971                 handle_swbp(regs);
1972 }
1973
1974 /*
1975  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1976  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1977  */
1978 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1979 {
1980         if (!current->mm)
1981                 return 0;
1982
1983         if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
1984             (!current->utask || !current->utask->return_instances))
1985                 return 0;
1986
1987         set_thread_flag(TIF_UPROBE);
1988         return 1;
1989 }
1990
1991 /*
1992  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
1993  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
1994  */
1995 int uprobe_post_sstep_notifier(struct pt_regs *regs)
1996 {
1997         struct uprobe_task *utask = current->utask;
1998
1999         if (!current->mm || !utask || !utask->active_uprobe)
2000                 /* task is currently not uprobed */
2001                 return 0;
2002
2003         utask->state = UTASK_SSTEP_ACK;
2004         set_thread_flag(TIF_UPROBE);
2005         return 1;
2006 }
2007
2008 static struct notifier_block uprobe_exception_nb = {
2009         .notifier_call          = arch_uprobe_exception_notify,
2010         .priority               = INT_MAX-1,    /* notified after kprobes, kgdb */
2011 };
2012
2013 static int __init init_uprobes(void)
2014 {
2015         int i;
2016
2017         for (i = 0; i < UPROBES_HASH_SZ; i++)
2018                 mutex_init(&uprobes_mmap_mutex[i]);
2019
2020         if (percpu_init_rwsem(&dup_mmap_sem))
2021                 return -ENOMEM;
2022
2023         return register_die_notifier(&uprobe_exception_nb);
2024 }
2025 __initcall(init_uprobes);