kprobes: Unpoison stack in jprobe_return() for KASAN
[cascardo/linux.git] / mm / kasan / kasan.c
1 /*
2  * This file contains shadow memory manipulation code.
3  *
4  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6  *
7  * Some code borrowed from https://github.com/xairy/kasan-prototype by
8  *        Andrey Konovalov <adech.fo@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
18
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/stacktrace.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/vmalloc.h>
37 #include <linux/bug.h>
38
39 #include "kasan.h"
40 #include "../slab.h"
41
42 /*
43  * Poisons the shadow memory for 'size' bytes starting from 'addr'.
44  * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
45  */
46 static void kasan_poison_shadow(const void *address, size_t size, u8 value)
47 {
48         void *shadow_start, *shadow_end;
49
50         shadow_start = kasan_mem_to_shadow(address);
51         shadow_end = kasan_mem_to_shadow(address + size);
52
53         memset(shadow_start, value, shadow_end - shadow_start);
54 }
55
56 void kasan_unpoison_shadow(const void *address, size_t size)
57 {
58         kasan_poison_shadow(address, size, 0);
59
60         if (size & KASAN_SHADOW_MASK) {
61                 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
62                 *shadow = size & KASAN_SHADOW_MASK;
63         }
64 }
65
66 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
67 {
68         void *base = task_stack_page(task);
69         size_t size = sp - base;
70
71         kasan_unpoison_shadow(base, size);
72 }
73
74 /* Unpoison the entire stack for a task. */
75 void kasan_unpoison_task_stack(struct task_struct *task)
76 {
77         __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
78 }
79
80 /* Unpoison the stack for the current task beyond a watermark sp value. */
81 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
82 {
83         __kasan_unpoison_stack(current, watermark);
84 }
85
86 /*
87  * Clear all poison for the region between the current SP and a provided
88  * watermark value, as is sometimes required prior to hand-crafted asm function
89  * returns in the middle of functions.
90  */
91 void kasan_unpoison_stack_above_sp_to(const void *watermark)
92 {
93         const void *sp = __builtin_frame_address(0);
94         size_t size = watermark - sp;
95
96         if (WARN_ON(sp > watermark))
97                 return;
98         kasan_unpoison_shadow(sp, size);
99 }
100
101 /*
102  * All functions below always inlined so compiler could
103  * perform better optimizations in each of __asan_loadX/__assn_storeX
104  * depending on memory access size X.
105  */
106
107 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
108 {
109         s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
110
111         if (unlikely(shadow_value)) {
112                 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
113                 return unlikely(last_accessible_byte >= shadow_value);
114         }
115
116         return false;
117 }
118
119 static __always_inline bool memory_is_poisoned_2(unsigned long addr)
120 {
121         u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
122
123         if (unlikely(*shadow_addr)) {
124                 if (memory_is_poisoned_1(addr + 1))
125                         return true;
126
127                 /*
128                  * If single shadow byte covers 2-byte access, we don't
129                  * need to do anything more. Otherwise, test the first
130                  * shadow byte.
131                  */
132                 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
133                         return false;
134
135                 return unlikely(*(u8 *)shadow_addr);
136         }
137
138         return false;
139 }
140
141 static __always_inline bool memory_is_poisoned_4(unsigned long addr)
142 {
143         u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
144
145         if (unlikely(*shadow_addr)) {
146                 if (memory_is_poisoned_1(addr + 3))
147                         return true;
148
149                 /*
150                  * If single shadow byte covers 4-byte access, we don't
151                  * need to do anything more. Otherwise, test the first
152                  * shadow byte.
153                  */
154                 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
155                         return false;
156
157                 return unlikely(*(u8 *)shadow_addr);
158         }
159
160         return false;
161 }
162
163 static __always_inline bool memory_is_poisoned_8(unsigned long addr)
164 {
165         u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
166
167         if (unlikely(*shadow_addr)) {
168                 if (memory_is_poisoned_1(addr + 7))
169                         return true;
170
171                 /*
172                  * If single shadow byte covers 8-byte access, we don't
173                  * need to do anything more. Otherwise, test the first
174                  * shadow byte.
175                  */
176                 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
177                         return false;
178
179                 return unlikely(*(u8 *)shadow_addr);
180         }
181
182         return false;
183 }
184
185 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
186 {
187         u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
188
189         if (unlikely(*shadow_addr)) {
190                 u16 shadow_first_bytes = *(u16 *)shadow_addr;
191
192                 if (unlikely(shadow_first_bytes))
193                         return true;
194
195                 /*
196                  * If two shadow bytes covers 16-byte access, we don't
197                  * need to do anything more. Otherwise, test the last
198                  * shadow byte.
199                  */
200                 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
201                         return false;
202
203                 return memory_is_poisoned_1(addr + 15);
204         }
205
206         return false;
207 }
208
209 static __always_inline unsigned long bytes_is_zero(const u8 *start,
210                                         size_t size)
211 {
212         while (size) {
213                 if (unlikely(*start))
214                         return (unsigned long)start;
215                 start++;
216                 size--;
217         }
218
219         return 0;
220 }
221
222 static __always_inline unsigned long memory_is_zero(const void *start,
223                                                 const void *end)
224 {
225         unsigned int words;
226         unsigned long ret;
227         unsigned int prefix = (unsigned long)start % 8;
228
229         if (end - start <= 16)
230                 return bytes_is_zero(start, end - start);
231
232         if (prefix) {
233                 prefix = 8 - prefix;
234                 ret = bytes_is_zero(start, prefix);
235                 if (unlikely(ret))
236                         return ret;
237                 start += prefix;
238         }
239
240         words = (end - start) / 8;
241         while (words) {
242                 if (unlikely(*(u64 *)start))
243                         return bytes_is_zero(start, 8);
244                 start += 8;
245                 words--;
246         }
247
248         return bytes_is_zero(start, (end - start) % 8);
249 }
250
251 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
252                                                 size_t size)
253 {
254         unsigned long ret;
255
256         ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
257                         kasan_mem_to_shadow((void *)addr + size - 1) + 1);
258
259         if (unlikely(ret)) {
260                 unsigned long last_byte = addr + size - 1;
261                 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
262
263                 if (unlikely(ret != (unsigned long)last_shadow ||
264                         ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
265                         return true;
266         }
267         return false;
268 }
269
270 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
271 {
272         if (__builtin_constant_p(size)) {
273                 switch (size) {
274                 case 1:
275                         return memory_is_poisoned_1(addr);
276                 case 2:
277                         return memory_is_poisoned_2(addr);
278                 case 4:
279                         return memory_is_poisoned_4(addr);
280                 case 8:
281                         return memory_is_poisoned_8(addr);
282                 case 16:
283                         return memory_is_poisoned_16(addr);
284                 default:
285                         BUILD_BUG();
286                 }
287         }
288
289         return memory_is_poisoned_n(addr, size);
290 }
291
292 static __always_inline void check_memory_region_inline(unsigned long addr,
293                                                 size_t size, bool write,
294                                                 unsigned long ret_ip)
295 {
296         if (unlikely(size == 0))
297                 return;
298
299         if (unlikely((void *)addr <
300                 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
301                 kasan_report(addr, size, write, ret_ip);
302                 return;
303         }
304
305         if (likely(!memory_is_poisoned(addr, size)))
306                 return;
307
308         kasan_report(addr, size, write, ret_ip);
309 }
310
311 static void check_memory_region(unsigned long addr,
312                                 size_t size, bool write,
313                                 unsigned long ret_ip)
314 {
315         check_memory_region_inline(addr, size, write, ret_ip);
316 }
317
318 void kasan_check_read(const void *p, unsigned int size)
319 {
320         check_memory_region((unsigned long)p, size, false, _RET_IP_);
321 }
322 EXPORT_SYMBOL(kasan_check_read);
323
324 void kasan_check_write(const void *p, unsigned int size)
325 {
326         check_memory_region((unsigned long)p, size, true, _RET_IP_);
327 }
328 EXPORT_SYMBOL(kasan_check_write);
329
330 #undef memset
331 void *memset(void *addr, int c, size_t len)
332 {
333         check_memory_region((unsigned long)addr, len, true, _RET_IP_);
334
335         return __memset(addr, c, len);
336 }
337
338 #undef memmove
339 void *memmove(void *dest, const void *src, size_t len)
340 {
341         check_memory_region((unsigned long)src, len, false, _RET_IP_);
342         check_memory_region((unsigned long)dest, len, true, _RET_IP_);
343
344         return __memmove(dest, src, len);
345 }
346
347 #undef memcpy
348 void *memcpy(void *dest, const void *src, size_t len)
349 {
350         check_memory_region((unsigned long)src, len, false, _RET_IP_);
351         check_memory_region((unsigned long)dest, len, true, _RET_IP_);
352
353         return __memcpy(dest, src, len);
354 }
355
356 void kasan_alloc_pages(struct page *page, unsigned int order)
357 {
358         if (likely(!PageHighMem(page)))
359                 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
360 }
361
362 void kasan_free_pages(struct page *page, unsigned int order)
363 {
364         if (likely(!PageHighMem(page)))
365                 kasan_poison_shadow(page_address(page),
366                                 PAGE_SIZE << order,
367                                 KASAN_FREE_PAGE);
368 }
369
370 /*
371  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
372  * For larger allocations larger redzones are used.
373  */
374 static size_t optimal_redzone(size_t object_size)
375 {
376         int rz =
377                 object_size <= 64        - 16   ? 16 :
378                 object_size <= 128       - 32   ? 32 :
379                 object_size <= 512       - 64   ? 64 :
380                 object_size <= 4096      - 128  ? 128 :
381                 object_size <= (1 << 14) - 256  ? 256 :
382                 object_size <= (1 << 15) - 512  ? 512 :
383                 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
384         return rz;
385 }
386
387 void kasan_cache_create(struct kmem_cache *cache, size_t *size,
388                         unsigned long *flags)
389 {
390         int redzone_adjust;
391         int orig_size = *size;
392
393         /* Add alloc meta. */
394         cache->kasan_info.alloc_meta_offset = *size;
395         *size += sizeof(struct kasan_alloc_meta);
396
397         /* Add free meta. */
398         if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
399             cache->object_size < sizeof(struct kasan_free_meta)) {
400                 cache->kasan_info.free_meta_offset = *size;
401                 *size += sizeof(struct kasan_free_meta);
402         }
403         redzone_adjust = optimal_redzone(cache->object_size) -
404                 (*size - cache->object_size);
405
406         if (redzone_adjust > 0)
407                 *size += redzone_adjust;
408
409         *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
410                                         optimal_redzone(cache->object_size)));
411
412         /*
413          * If the metadata doesn't fit, don't enable KASAN at all.
414          */
415         if (*size <= cache->kasan_info.alloc_meta_offset ||
416                         *size <= cache->kasan_info.free_meta_offset) {
417                 cache->kasan_info.alloc_meta_offset = 0;
418                 cache->kasan_info.free_meta_offset = 0;
419                 *size = orig_size;
420                 return;
421         }
422
423         *flags |= SLAB_KASAN;
424 }
425
426 void kasan_cache_shrink(struct kmem_cache *cache)
427 {
428         quarantine_remove_cache(cache);
429 }
430
431 void kasan_cache_destroy(struct kmem_cache *cache)
432 {
433         quarantine_remove_cache(cache);
434 }
435
436 size_t kasan_metadata_size(struct kmem_cache *cache)
437 {
438         return (cache->kasan_info.alloc_meta_offset ?
439                 sizeof(struct kasan_alloc_meta) : 0) +
440                 (cache->kasan_info.free_meta_offset ?
441                 sizeof(struct kasan_free_meta) : 0);
442 }
443
444 void kasan_poison_slab(struct page *page)
445 {
446         kasan_poison_shadow(page_address(page),
447                         PAGE_SIZE << compound_order(page),
448                         KASAN_KMALLOC_REDZONE);
449 }
450
451 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
452 {
453         kasan_unpoison_shadow(object, cache->object_size);
454 }
455
456 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
457 {
458         kasan_poison_shadow(object,
459                         round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
460                         KASAN_KMALLOC_REDZONE);
461 }
462
463 static inline int in_irqentry_text(unsigned long ptr)
464 {
465         return (ptr >= (unsigned long)&__irqentry_text_start &&
466                 ptr < (unsigned long)&__irqentry_text_end) ||
467                 (ptr >= (unsigned long)&__softirqentry_text_start &&
468                  ptr < (unsigned long)&__softirqentry_text_end);
469 }
470
471 static inline void filter_irq_stacks(struct stack_trace *trace)
472 {
473         int i;
474
475         if (!trace->nr_entries)
476                 return;
477         for (i = 0; i < trace->nr_entries; i++)
478                 if (in_irqentry_text(trace->entries[i])) {
479                         /* Include the irqentry function into the stack. */
480                         trace->nr_entries = i + 1;
481                         break;
482                 }
483 }
484
485 static inline depot_stack_handle_t save_stack(gfp_t flags)
486 {
487         unsigned long entries[KASAN_STACK_DEPTH];
488         struct stack_trace trace = {
489                 .nr_entries = 0,
490                 .entries = entries,
491                 .max_entries = KASAN_STACK_DEPTH,
492                 .skip = 0
493         };
494
495         save_stack_trace(&trace);
496         filter_irq_stacks(&trace);
497         if (trace.nr_entries != 0 &&
498             trace.entries[trace.nr_entries-1] == ULONG_MAX)
499                 trace.nr_entries--;
500
501         return depot_save_stack(&trace, flags);
502 }
503
504 static inline void set_track(struct kasan_track *track, gfp_t flags)
505 {
506         track->pid = current->pid;
507         track->stack = save_stack(flags);
508 }
509
510 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
511                                         const void *object)
512 {
513         BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
514         return (void *)object + cache->kasan_info.alloc_meta_offset;
515 }
516
517 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
518                                       const void *object)
519 {
520         BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
521         return (void *)object + cache->kasan_info.free_meta_offset;
522 }
523
524 void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
525 {
526         struct kasan_alloc_meta *alloc_info;
527
528         if (!(cache->flags & SLAB_KASAN))
529                 return;
530
531         alloc_info = get_alloc_info(cache, object);
532         __memset(alloc_info, 0, sizeof(*alloc_info));
533 }
534
535 void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
536 {
537         kasan_kmalloc(cache, object, cache->object_size, flags);
538 }
539
540 static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
541 {
542         unsigned long size = cache->object_size;
543         unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
544
545         /* RCU slabs could be legally used after free within the RCU period */
546         if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
547                 return;
548
549         kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
550 }
551
552 bool kasan_slab_free(struct kmem_cache *cache, void *object)
553 {
554         s8 shadow_byte;
555
556         /* RCU slabs could be legally used after free within the RCU period */
557         if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
558                 return false;
559
560         shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
561         if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
562                 kasan_report_double_free(cache, object, shadow_byte);
563                 return true;
564         }
565
566         kasan_poison_slab_free(cache, object);
567
568         if (unlikely(!(cache->flags & SLAB_KASAN)))
569                 return false;
570
571         set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
572         quarantine_put(get_free_info(cache, object), cache);
573         return true;
574 }
575
576 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
577                    gfp_t flags)
578 {
579         unsigned long redzone_start;
580         unsigned long redzone_end;
581
582         if (gfpflags_allow_blocking(flags))
583                 quarantine_reduce();
584
585         if (unlikely(object == NULL))
586                 return;
587
588         redzone_start = round_up((unsigned long)(object + size),
589                                 KASAN_SHADOW_SCALE_SIZE);
590         redzone_end = round_up((unsigned long)object + cache->object_size,
591                                 KASAN_SHADOW_SCALE_SIZE);
592
593         kasan_unpoison_shadow(object, size);
594         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
595                 KASAN_KMALLOC_REDZONE);
596
597         if (cache->flags & SLAB_KASAN)
598                 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
599 }
600 EXPORT_SYMBOL(kasan_kmalloc);
601
602 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
603 {
604         struct page *page;
605         unsigned long redzone_start;
606         unsigned long redzone_end;
607
608         if (gfpflags_allow_blocking(flags))
609                 quarantine_reduce();
610
611         if (unlikely(ptr == NULL))
612                 return;
613
614         page = virt_to_page(ptr);
615         redzone_start = round_up((unsigned long)(ptr + size),
616                                 KASAN_SHADOW_SCALE_SIZE);
617         redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
618
619         kasan_unpoison_shadow(ptr, size);
620         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
621                 KASAN_PAGE_REDZONE);
622 }
623
624 void kasan_krealloc(const void *object, size_t size, gfp_t flags)
625 {
626         struct page *page;
627
628         if (unlikely(object == ZERO_SIZE_PTR))
629                 return;
630
631         page = virt_to_head_page(object);
632
633         if (unlikely(!PageSlab(page)))
634                 kasan_kmalloc_large(object, size, flags);
635         else
636                 kasan_kmalloc(page->slab_cache, object, size, flags);
637 }
638
639 void kasan_poison_kfree(void *ptr)
640 {
641         struct page *page;
642
643         page = virt_to_head_page(ptr);
644
645         if (unlikely(!PageSlab(page)))
646                 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
647                                 KASAN_FREE_PAGE);
648         else
649                 kasan_poison_slab_free(page->slab_cache, ptr);
650 }
651
652 void kasan_kfree_large(const void *ptr)
653 {
654         struct page *page = virt_to_page(ptr);
655
656         kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
657                         KASAN_FREE_PAGE);
658 }
659
660 int kasan_module_alloc(void *addr, size_t size)
661 {
662         void *ret;
663         size_t shadow_size;
664         unsigned long shadow_start;
665
666         shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
667         shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
668                         PAGE_SIZE);
669
670         if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
671                 return -EINVAL;
672
673         ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
674                         shadow_start + shadow_size,
675                         GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
676                         PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
677                         __builtin_return_address(0));
678
679         if (ret) {
680                 find_vm_area(addr)->flags |= VM_KASAN;
681                 kmemleak_ignore(ret);
682                 return 0;
683         }
684
685         return -ENOMEM;
686 }
687
688 void kasan_free_shadow(const struct vm_struct *vm)
689 {
690         if (vm->flags & VM_KASAN)
691                 vfree(kasan_mem_to_shadow(vm->addr));
692 }
693
694 static void register_global(struct kasan_global *global)
695 {
696         size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
697
698         kasan_unpoison_shadow(global->beg, global->size);
699
700         kasan_poison_shadow(global->beg + aligned_size,
701                 global->size_with_redzone - aligned_size,
702                 KASAN_GLOBAL_REDZONE);
703 }
704
705 void __asan_register_globals(struct kasan_global *globals, size_t size)
706 {
707         int i;
708
709         for (i = 0; i < size; i++)
710                 register_global(&globals[i]);
711 }
712 EXPORT_SYMBOL(__asan_register_globals);
713
714 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
715 {
716 }
717 EXPORT_SYMBOL(__asan_unregister_globals);
718
719 #define DEFINE_ASAN_LOAD_STORE(size)                                    \
720         void __asan_load##size(unsigned long addr)                      \
721         {                                                               \
722                 check_memory_region_inline(addr, size, false, _RET_IP_);\
723         }                                                               \
724         EXPORT_SYMBOL(__asan_load##size);                               \
725         __alias(__asan_load##size)                                      \
726         void __asan_load##size##_noabort(unsigned long);                \
727         EXPORT_SYMBOL(__asan_load##size##_noabort);                     \
728         void __asan_store##size(unsigned long addr)                     \
729         {                                                               \
730                 check_memory_region_inline(addr, size, true, _RET_IP_); \
731         }                                                               \
732         EXPORT_SYMBOL(__asan_store##size);                              \
733         __alias(__asan_store##size)                                     \
734         void __asan_store##size##_noabort(unsigned long);               \
735         EXPORT_SYMBOL(__asan_store##size##_noabort)
736
737 DEFINE_ASAN_LOAD_STORE(1);
738 DEFINE_ASAN_LOAD_STORE(2);
739 DEFINE_ASAN_LOAD_STORE(4);
740 DEFINE_ASAN_LOAD_STORE(8);
741 DEFINE_ASAN_LOAD_STORE(16);
742
743 void __asan_loadN(unsigned long addr, size_t size)
744 {
745         check_memory_region(addr, size, false, _RET_IP_);
746 }
747 EXPORT_SYMBOL(__asan_loadN);
748
749 __alias(__asan_loadN)
750 void __asan_loadN_noabort(unsigned long, size_t);
751 EXPORT_SYMBOL(__asan_loadN_noabort);
752
753 void __asan_storeN(unsigned long addr, size_t size)
754 {
755         check_memory_region(addr, size, true, _RET_IP_);
756 }
757 EXPORT_SYMBOL(__asan_storeN);
758
759 __alias(__asan_storeN)
760 void __asan_storeN_noabort(unsigned long, size_t);
761 EXPORT_SYMBOL(__asan_storeN_noabort);
762
763 /* to shut up compiler complaints */
764 void __asan_handle_no_return(void) {}
765 EXPORT_SYMBOL(__asan_handle_no_return);
766
767 #ifdef CONFIG_MEMORY_HOTPLUG
768 static int kasan_mem_notifier(struct notifier_block *nb,
769                         unsigned long action, void *data)
770 {
771         return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
772 }
773
774 static int __init kasan_memhotplug_init(void)
775 {
776         pr_info("WARNING: KASAN doesn't support memory hot-add\n");
777         pr_info("Memory hot-add will be disabled\n");
778
779         hotplug_memory_notifier(kasan_mem_notifier, 0);
780
781         return 0;
782 }
783
784 module_init(kasan_memhotplug_init);
785 #endif