Merge tag 'pci-v4.8-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[cascardo/linux.git] / mm / kasan / kasan.c
1 /*
2  * This file contains shadow memory manipulation code.
3  *
4  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6  *
7  * Some code borrowed from https://github.com/xairy/kasan-prototype by
8  *        Andrey Konovalov <adech.fo@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
18
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/stacktrace.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/vmalloc.h>
37
38 #include "kasan.h"
39 #include "../slab.h"
40
41 /*
42  * Poisons the shadow memory for 'size' bytes starting from 'addr'.
43  * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
44  */
45 static void kasan_poison_shadow(const void *address, size_t size, u8 value)
46 {
47         void *shadow_start, *shadow_end;
48
49         shadow_start = kasan_mem_to_shadow(address);
50         shadow_end = kasan_mem_to_shadow(address + size);
51
52         memset(shadow_start, value, shadow_end - shadow_start);
53 }
54
55 void kasan_unpoison_shadow(const void *address, size_t size)
56 {
57         kasan_poison_shadow(address, size, 0);
58
59         if (size & KASAN_SHADOW_MASK) {
60                 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
61                 *shadow = size & KASAN_SHADOW_MASK;
62         }
63 }
64
65 static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
66 {
67         void *base = task_stack_page(task);
68         size_t size = sp - base;
69
70         kasan_unpoison_shadow(base, size);
71 }
72
73 /* Unpoison the entire stack for a task. */
74 void kasan_unpoison_task_stack(struct task_struct *task)
75 {
76         __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
77 }
78
79 /* Unpoison the stack for the current task beyond a watermark sp value. */
80 asmlinkage void kasan_unpoison_remaining_stack(void *sp)
81 {
82         __kasan_unpoison_stack(current, sp);
83 }
84
85 /*
86  * All functions below always inlined so compiler could
87  * perform better optimizations in each of __asan_loadX/__assn_storeX
88  * depending on memory access size X.
89  */
90
91 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
92 {
93         s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
94
95         if (unlikely(shadow_value)) {
96                 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
97                 return unlikely(last_accessible_byte >= shadow_value);
98         }
99
100         return false;
101 }
102
103 static __always_inline bool memory_is_poisoned_2(unsigned long addr)
104 {
105         u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
106
107         if (unlikely(*shadow_addr)) {
108                 if (memory_is_poisoned_1(addr + 1))
109                         return true;
110
111                 /*
112                  * If single shadow byte covers 2-byte access, we don't
113                  * need to do anything more. Otherwise, test the first
114                  * shadow byte.
115                  */
116                 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
117                         return false;
118
119                 return unlikely(*(u8 *)shadow_addr);
120         }
121
122         return false;
123 }
124
125 static __always_inline bool memory_is_poisoned_4(unsigned long addr)
126 {
127         u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
128
129         if (unlikely(*shadow_addr)) {
130                 if (memory_is_poisoned_1(addr + 3))
131                         return true;
132
133                 /*
134                  * If single shadow byte covers 4-byte access, we don't
135                  * need to do anything more. Otherwise, test the first
136                  * shadow byte.
137                  */
138                 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
139                         return false;
140
141                 return unlikely(*(u8 *)shadow_addr);
142         }
143
144         return false;
145 }
146
147 static __always_inline bool memory_is_poisoned_8(unsigned long addr)
148 {
149         u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
150
151         if (unlikely(*shadow_addr)) {
152                 if (memory_is_poisoned_1(addr + 7))
153                         return true;
154
155                 /*
156                  * If single shadow byte covers 8-byte access, we don't
157                  * need to do anything more. Otherwise, test the first
158                  * shadow byte.
159                  */
160                 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
161                         return false;
162
163                 return unlikely(*(u8 *)shadow_addr);
164         }
165
166         return false;
167 }
168
169 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
170 {
171         u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
172
173         if (unlikely(*shadow_addr)) {
174                 u16 shadow_first_bytes = *(u16 *)shadow_addr;
175
176                 if (unlikely(shadow_first_bytes))
177                         return true;
178
179                 /*
180                  * If two shadow bytes covers 16-byte access, we don't
181                  * need to do anything more. Otherwise, test the last
182                  * shadow byte.
183                  */
184                 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
185                         return false;
186
187                 return memory_is_poisoned_1(addr + 15);
188         }
189
190         return false;
191 }
192
193 static __always_inline unsigned long bytes_is_zero(const u8 *start,
194                                         size_t size)
195 {
196         while (size) {
197                 if (unlikely(*start))
198                         return (unsigned long)start;
199                 start++;
200                 size--;
201         }
202
203         return 0;
204 }
205
206 static __always_inline unsigned long memory_is_zero(const void *start,
207                                                 const void *end)
208 {
209         unsigned int words;
210         unsigned long ret;
211         unsigned int prefix = (unsigned long)start % 8;
212
213         if (end - start <= 16)
214                 return bytes_is_zero(start, end - start);
215
216         if (prefix) {
217                 prefix = 8 - prefix;
218                 ret = bytes_is_zero(start, prefix);
219                 if (unlikely(ret))
220                         return ret;
221                 start += prefix;
222         }
223
224         words = (end - start) / 8;
225         while (words) {
226                 if (unlikely(*(u64 *)start))
227                         return bytes_is_zero(start, 8);
228                 start += 8;
229                 words--;
230         }
231
232         return bytes_is_zero(start, (end - start) % 8);
233 }
234
235 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
236                                                 size_t size)
237 {
238         unsigned long ret;
239
240         ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
241                         kasan_mem_to_shadow((void *)addr + size - 1) + 1);
242
243         if (unlikely(ret)) {
244                 unsigned long last_byte = addr + size - 1;
245                 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
246
247                 if (unlikely(ret != (unsigned long)last_shadow ||
248                         ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
249                         return true;
250         }
251         return false;
252 }
253
254 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
255 {
256         if (__builtin_constant_p(size)) {
257                 switch (size) {
258                 case 1:
259                         return memory_is_poisoned_1(addr);
260                 case 2:
261                         return memory_is_poisoned_2(addr);
262                 case 4:
263                         return memory_is_poisoned_4(addr);
264                 case 8:
265                         return memory_is_poisoned_8(addr);
266                 case 16:
267                         return memory_is_poisoned_16(addr);
268                 default:
269                         BUILD_BUG();
270                 }
271         }
272
273         return memory_is_poisoned_n(addr, size);
274 }
275
276 static __always_inline void check_memory_region_inline(unsigned long addr,
277                                                 size_t size, bool write,
278                                                 unsigned long ret_ip)
279 {
280         if (unlikely(size == 0))
281                 return;
282
283         if (unlikely((void *)addr <
284                 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
285                 kasan_report(addr, size, write, ret_ip);
286                 return;
287         }
288
289         if (likely(!memory_is_poisoned(addr, size)))
290                 return;
291
292         kasan_report(addr, size, write, ret_ip);
293 }
294
295 static void check_memory_region(unsigned long addr,
296                                 size_t size, bool write,
297                                 unsigned long ret_ip)
298 {
299         check_memory_region_inline(addr, size, write, ret_ip);
300 }
301
302 void kasan_check_read(const void *p, unsigned int size)
303 {
304         check_memory_region((unsigned long)p, size, false, _RET_IP_);
305 }
306 EXPORT_SYMBOL(kasan_check_read);
307
308 void kasan_check_write(const void *p, unsigned int size)
309 {
310         check_memory_region((unsigned long)p, size, true, _RET_IP_);
311 }
312 EXPORT_SYMBOL(kasan_check_write);
313
314 #undef memset
315 void *memset(void *addr, int c, size_t len)
316 {
317         check_memory_region((unsigned long)addr, len, true, _RET_IP_);
318
319         return __memset(addr, c, len);
320 }
321
322 #undef memmove
323 void *memmove(void *dest, const void *src, size_t len)
324 {
325         check_memory_region((unsigned long)src, len, false, _RET_IP_);
326         check_memory_region((unsigned long)dest, len, true, _RET_IP_);
327
328         return __memmove(dest, src, len);
329 }
330
331 #undef memcpy
332 void *memcpy(void *dest, const void *src, size_t len)
333 {
334         check_memory_region((unsigned long)src, len, false, _RET_IP_);
335         check_memory_region((unsigned long)dest, len, true, _RET_IP_);
336
337         return __memcpy(dest, src, len);
338 }
339
340 void kasan_alloc_pages(struct page *page, unsigned int order)
341 {
342         if (likely(!PageHighMem(page)))
343                 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
344 }
345
346 void kasan_free_pages(struct page *page, unsigned int order)
347 {
348         if (likely(!PageHighMem(page)))
349                 kasan_poison_shadow(page_address(page),
350                                 PAGE_SIZE << order,
351                                 KASAN_FREE_PAGE);
352 }
353
354 /*
355  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
356  * For larger allocations larger redzones are used.
357  */
358 static size_t optimal_redzone(size_t object_size)
359 {
360         int rz =
361                 object_size <= 64        - 16   ? 16 :
362                 object_size <= 128       - 32   ? 32 :
363                 object_size <= 512       - 64   ? 64 :
364                 object_size <= 4096      - 128  ? 128 :
365                 object_size <= (1 << 14) - 256  ? 256 :
366                 object_size <= (1 << 15) - 512  ? 512 :
367                 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
368         return rz;
369 }
370
371 void kasan_cache_create(struct kmem_cache *cache, size_t *size,
372                         unsigned long *flags)
373 {
374         int redzone_adjust;
375         int orig_size = *size;
376
377         /* Add alloc meta. */
378         cache->kasan_info.alloc_meta_offset = *size;
379         *size += sizeof(struct kasan_alloc_meta);
380
381         /* Add free meta. */
382         if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
383             cache->object_size < sizeof(struct kasan_free_meta)) {
384                 cache->kasan_info.free_meta_offset = *size;
385                 *size += sizeof(struct kasan_free_meta);
386         }
387         redzone_adjust = optimal_redzone(cache->object_size) -
388                 (*size - cache->object_size);
389
390         if (redzone_adjust > 0)
391                 *size += redzone_adjust;
392
393         *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
394                                         optimal_redzone(cache->object_size)));
395
396         /*
397          * If the metadata doesn't fit, don't enable KASAN at all.
398          */
399         if (*size <= cache->kasan_info.alloc_meta_offset ||
400                         *size <= cache->kasan_info.free_meta_offset) {
401                 cache->kasan_info.alloc_meta_offset = 0;
402                 cache->kasan_info.free_meta_offset = 0;
403                 *size = orig_size;
404                 return;
405         }
406
407         *flags |= SLAB_KASAN;
408 }
409
410 void kasan_cache_shrink(struct kmem_cache *cache)
411 {
412         quarantine_remove_cache(cache);
413 }
414
415 void kasan_cache_destroy(struct kmem_cache *cache)
416 {
417         quarantine_remove_cache(cache);
418 }
419
420 size_t kasan_metadata_size(struct kmem_cache *cache)
421 {
422         return (cache->kasan_info.alloc_meta_offset ?
423                 sizeof(struct kasan_alloc_meta) : 0) +
424                 (cache->kasan_info.free_meta_offset ?
425                 sizeof(struct kasan_free_meta) : 0);
426 }
427
428 void kasan_poison_slab(struct page *page)
429 {
430         kasan_poison_shadow(page_address(page),
431                         PAGE_SIZE << compound_order(page),
432                         KASAN_KMALLOC_REDZONE);
433 }
434
435 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
436 {
437         kasan_unpoison_shadow(object, cache->object_size);
438 }
439
440 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
441 {
442         kasan_poison_shadow(object,
443                         round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
444                         KASAN_KMALLOC_REDZONE);
445         if (cache->flags & SLAB_KASAN) {
446                 struct kasan_alloc_meta *alloc_info =
447                         get_alloc_info(cache, object);
448                 alloc_info->state = KASAN_STATE_INIT;
449         }
450 }
451
452 static inline int in_irqentry_text(unsigned long ptr)
453 {
454         return (ptr >= (unsigned long)&__irqentry_text_start &&
455                 ptr < (unsigned long)&__irqentry_text_end) ||
456                 (ptr >= (unsigned long)&__softirqentry_text_start &&
457                  ptr < (unsigned long)&__softirqentry_text_end);
458 }
459
460 static inline void filter_irq_stacks(struct stack_trace *trace)
461 {
462         int i;
463
464         if (!trace->nr_entries)
465                 return;
466         for (i = 0; i < trace->nr_entries; i++)
467                 if (in_irqentry_text(trace->entries[i])) {
468                         /* Include the irqentry function into the stack. */
469                         trace->nr_entries = i + 1;
470                         break;
471                 }
472 }
473
474 static inline depot_stack_handle_t save_stack(gfp_t flags)
475 {
476         unsigned long entries[KASAN_STACK_DEPTH];
477         struct stack_trace trace = {
478                 .nr_entries = 0,
479                 .entries = entries,
480                 .max_entries = KASAN_STACK_DEPTH,
481                 .skip = 0
482         };
483
484         save_stack_trace(&trace);
485         filter_irq_stacks(&trace);
486         if (trace.nr_entries != 0 &&
487             trace.entries[trace.nr_entries-1] == ULONG_MAX)
488                 trace.nr_entries--;
489
490         return depot_save_stack(&trace, flags);
491 }
492
493 static inline void set_track(struct kasan_track *track, gfp_t flags)
494 {
495         track->pid = current->pid;
496         track->stack = save_stack(flags);
497 }
498
499 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
500                                         const void *object)
501 {
502         BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
503         return (void *)object + cache->kasan_info.alloc_meta_offset;
504 }
505
506 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
507                                       const void *object)
508 {
509         BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
510         return (void *)object + cache->kasan_info.free_meta_offset;
511 }
512
513 void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
514 {
515         kasan_kmalloc(cache, object, cache->object_size, flags);
516 }
517
518 static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
519 {
520         unsigned long size = cache->object_size;
521         unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
522
523         /* RCU slabs could be legally used after free within the RCU period */
524         if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
525                 return;
526
527         kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
528 }
529
530 bool kasan_slab_free(struct kmem_cache *cache, void *object)
531 {
532         /* RCU slabs could be legally used after free within the RCU period */
533         if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
534                 return false;
535
536         if (likely(cache->flags & SLAB_KASAN)) {
537                 struct kasan_alloc_meta *alloc_info;
538                 struct kasan_free_meta *free_info;
539
540                 alloc_info = get_alloc_info(cache, object);
541                 free_info = get_free_info(cache, object);
542
543                 switch (alloc_info->state) {
544                 case KASAN_STATE_ALLOC:
545                         alloc_info->state = KASAN_STATE_QUARANTINE;
546                         quarantine_put(free_info, cache);
547                         set_track(&free_info->track, GFP_NOWAIT);
548                         kasan_poison_slab_free(cache, object);
549                         return true;
550                 case KASAN_STATE_QUARANTINE:
551                 case KASAN_STATE_FREE:
552                         pr_err("Double free");
553                         dump_stack();
554                         break;
555                 default:
556                         break;
557                 }
558         }
559         return false;
560 }
561
562 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
563                    gfp_t flags)
564 {
565         unsigned long redzone_start;
566         unsigned long redzone_end;
567
568         if (flags & __GFP_RECLAIM)
569                 quarantine_reduce();
570
571         if (unlikely(object == NULL))
572                 return;
573
574         redzone_start = round_up((unsigned long)(object + size),
575                                 KASAN_SHADOW_SCALE_SIZE);
576         redzone_end = round_up((unsigned long)object + cache->object_size,
577                                 KASAN_SHADOW_SCALE_SIZE);
578
579         kasan_unpoison_shadow(object, size);
580         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
581                 KASAN_KMALLOC_REDZONE);
582         if (cache->flags & SLAB_KASAN) {
583                 struct kasan_alloc_meta *alloc_info =
584                         get_alloc_info(cache, object);
585
586                 alloc_info->state = KASAN_STATE_ALLOC;
587                 alloc_info->alloc_size = size;
588                 set_track(&alloc_info->track, flags);
589         }
590 }
591 EXPORT_SYMBOL(kasan_kmalloc);
592
593 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
594 {
595         struct page *page;
596         unsigned long redzone_start;
597         unsigned long redzone_end;
598
599         if (flags & __GFP_RECLAIM)
600                 quarantine_reduce();
601
602         if (unlikely(ptr == NULL))
603                 return;
604
605         page = virt_to_page(ptr);
606         redzone_start = round_up((unsigned long)(ptr + size),
607                                 KASAN_SHADOW_SCALE_SIZE);
608         redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
609
610         kasan_unpoison_shadow(ptr, size);
611         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
612                 KASAN_PAGE_REDZONE);
613 }
614
615 void kasan_krealloc(const void *object, size_t size, gfp_t flags)
616 {
617         struct page *page;
618
619         if (unlikely(object == ZERO_SIZE_PTR))
620                 return;
621
622         page = virt_to_head_page(object);
623
624         if (unlikely(!PageSlab(page)))
625                 kasan_kmalloc_large(object, size, flags);
626         else
627                 kasan_kmalloc(page->slab_cache, object, size, flags);
628 }
629
630 void kasan_poison_kfree(void *ptr)
631 {
632         struct page *page;
633
634         page = virt_to_head_page(ptr);
635
636         if (unlikely(!PageSlab(page)))
637                 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
638                                 KASAN_FREE_PAGE);
639         else
640                 kasan_poison_slab_free(page->slab_cache, ptr);
641 }
642
643 void kasan_kfree_large(const void *ptr)
644 {
645         struct page *page = virt_to_page(ptr);
646
647         kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
648                         KASAN_FREE_PAGE);
649 }
650
651 int kasan_module_alloc(void *addr, size_t size)
652 {
653         void *ret;
654         size_t shadow_size;
655         unsigned long shadow_start;
656
657         shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
658         shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
659                         PAGE_SIZE);
660
661         if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
662                 return -EINVAL;
663
664         ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
665                         shadow_start + shadow_size,
666                         GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
667                         PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
668                         __builtin_return_address(0));
669
670         if (ret) {
671                 find_vm_area(addr)->flags |= VM_KASAN;
672                 kmemleak_ignore(ret);
673                 return 0;
674         }
675
676         return -ENOMEM;
677 }
678
679 void kasan_free_shadow(const struct vm_struct *vm)
680 {
681         if (vm->flags & VM_KASAN)
682                 vfree(kasan_mem_to_shadow(vm->addr));
683 }
684
685 static void register_global(struct kasan_global *global)
686 {
687         size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
688
689         kasan_unpoison_shadow(global->beg, global->size);
690
691         kasan_poison_shadow(global->beg + aligned_size,
692                 global->size_with_redzone - aligned_size,
693                 KASAN_GLOBAL_REDZONE);
694 }
695
696 void __asan_register_globals(struct kasan_global *globals, size_t size)
697 {
698         int i;
699
700         for (i = 0; i < size; i++)
701                 register_global(&globals[i]);
702 }
703 EXPORT_SYMBOL(__asan_register_globals);
704
705 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
706 {
707 }
708 EXPORT_SYMBOL(__asan_unregister_globals);
709
710 #define DEFINE_ASAN_LOAD_STORE(size)                                    \
711         void __asan_load##size(unsigned long addr)                      \
712         {                                                               \
713                 check_memory_region_inline(addr, size, false, _RET_IP_);\
714         }                                                               \
715         EXPORT_SYMBOL(__asan_load##size);                               \
716         __alias(__asan_load##size)                                      \
717         void __asan_load##size##_noabort(unsigned long);                \
718         EXPORT_SYMBOL(__asan_load##size##_noabort);                     \
719         void __asan_store##size(unsigned long addr)                     \
720         {                                                               \
721                 check_memory_region_inline(addr, size, true, _RET_IP_); \
722         }                                                               \
723         EXPORT_SYMBOL(__asan_store##size);                              \
724         __alias(__asan_store##size)                                     \
725         void __asan_store##size##_noabort(unsigned long);               \
726         EXPORT_SYMBOL(__asan_store##size##_noabort)
727
728 DEFINE_ASAN_LOAD_STORE(1);
729 DEFINE_ASAN_LOAD_STORE(2);
730 DEFINE_ASAN_LOAD_STORE(4);
731 DEFINE_ASAN_LOAD_STORE(8);
732 DEFINE_ASAN_LOAD_STORE(16);
733
734 void __asan_loadN(unsigned long addr, size_t size)
735 {
736         check_memory_region(addr, size, false, _RET_IP_);
737 }
738 EXPORT_SYMBOL(__asan_loadN);
739
740 __alias(__asan_loadN)
741 void __asan_loadN_noabort(unsigned long, size_t);
742 EXPORT_SYMBOL(__asan_loadN_noabort);
743
744 void __asan_storeN(unsigned long addr, size_t size)
745 {
746         check_memory_region(addr, size, true, _RET_IP_);
747 }
748 EXPORT_SYMBOL(__asan_storeN);
749
750 __alias(__asan_storeN)
751 void __asan_storeN_noabort(unsigned long, size_t);
752 EXPORT_SYMBOL(__asan_storeN_noabort);
753
754 /* to shut up compiler complaints */
755 void __asan_handle_no_return(void) {}
756 EXPORT_SYMBOL(__asan_handle_no_return);
757
758 #ifdef CONFIG_MEMORY_HOTPLUG
759 static int kasan_mem_notifier(struct notifier_block *nb,
760                         unsigned long action, void *data)
761 {
762         return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
763 }
764
765 static int __init kasan_memhotplug_init(void)
766 {
767         pr_info("WARNING: KASAN doesn't support memory hot-add\n");
768         pr_info("Memory hot-add will be disabled\n");
769
770         hotplug_memory_notifier(kasan_mem_notifier, 0);
771
772         return 0;
773 }
774
775 module_init(kasan_memhotplug_init);
776 #endif