Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi...
[cascardo/linux.git] / mm / kasan / kasan.c
index 6845f92..88af13c 100644 (file)
@@ -351,7 +351,6 @@ void kasan_free_pages(struct page *page, unsigned int order)
                                KASAN_FREE_PAGE);
 }
 
-#ifdef CONFIG_SLAB
 /*
  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
  * For larger allocations larger redzones are used.
@@ -373,16 +372,8 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
                        unsigned long *flags)
 {
        int redzone_adjust;
-       /* Make sure the adjusted size is still less than
-        * KMALLOC_MAX_CACHE_SIZE.
-        * TODO: this check is only useful for SLAB, but not SLUB. We'll need
-        * to skip it for SLUB when it starts using kasan_cache_create().
-        */
-       if (*size > KMALLOC_MAX_CACHE_SIZE -
-           sizeof(struct kasan_alloc_meta) -
-           sizeof(struct kasan_free_meta))
-               return;
-       *flags |= SLAB_KASAN;
+       int orig_size = *size;
+
        /* Add alloc meta. */
        cache->kasan_info.alloc_meta_offset = *size;
        *size += sizeof(struct kasan_alloc_meta);
@@ -395,14 +386,26 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
        }
        redzone_adjust = optimal_redzone(cache->object_size) -
                (*size - cache->object_size);
+
        if (redzone_adjust > 0)
                *size += redzone_adjust;
-       *size = min(KMALLOC_MAX_CACHE_SIZE,
-                   max(*size,
-                       cache->object_size +
-                       optimal_redzone(cache->object_size)));
+
+       *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
+                                       optimal_redzone(cache->object_size)));
+
+       /*
+        * If the metadata doesn't fit, don't enable KASAN at all.
+        */
+       if (*size <= cache->kasan_info.alloc_meta_offset ||
+                       *size <= cache->kasan_info.free_meta_offset) {
+               cache->kasan_info.alloc_meta_offset = 0;
+               cache->kasan_info.free_meta_offset = 0;
+               *size = orig_size;
+               return;
+       }
+
+       *flags |= SLAB_KASAN;
 }
-#endif
 
 void kasan_cache_shrink(struct kmem_cache *cache)
 {
@@ -414,6 +417,14 @@ void kasan_cache_destroy(struct kmem_cache *cache)
        quarantine_remove_cache(cache);
 }
 
+size_t kasan_metadata_size(struct kmem_cache *cache)
+{
+       return (cache->kasan_info.alloc_meta_offset ?
+               sizeof(struct kasan_alloc_meta) : 0) +
+               (cache->kasan_info.free_meta_offset ?
+               sizeof(struct kasan_free_meta) : 0);
+}
+
 void kasan_poison_slab(struct page *page)
 {
        kasan_poison_shadow(page_address(page),
@@ -431,16 +442,8 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
        kasan_poison_shadow(object,
                        round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
                        KASAN_KMALLOC_REDZONE);
-#ifdef CONFIG_SLAB
-       if (cache->flags & SLAB_KASAN) {
-               struct kasan_alloc_meta *alloc_info =
-                       get_alloc_info(cache, object);
-               alloc_info->state = KASAN_STATE_INIT;
-       }
-#endif
 }
 
-#ifdef CONFIG_SLAB
 static inline int in_irqentry_text(unsigned long ptr)
 {
        return (ptr >= (unsigned long)&__irqentry_text_start &&
@@ -501,7 +504,17 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
        BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
        return (void *)object + cache->kasan_info.free_meta_offset;
 }
-#endif
+
+void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
+{
+       struct kasan_alloc_meta *alloc_info;
+
+       if (!(cache->flags & SLAB_KASAN))
+               return;
+
+       alloc_info = get_alloc_info(cache, object);
+       __memset(alloc_info, 0, sizeof(*alloc_info));
+}
 
 void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
 {
@@ -522,38 +535,26 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
 
 bool kasan_slab_free(struct kmem_cache *cache, void *object)
 {
-#ifdef CONFIG_SLAB
+       s8 shadow_byte;
+
        /* RCU slabs could be legally used after free within the RCU period */
        if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
                return false;
 
-       if (likely(cache->flags & SLAB_KASAN)) {
-               struct kasan_alloc_meta *alloc_info =
-                       get_alloc_info(cache, object);
-               struct kasan_free_meta *free_info =
-                       get_free_info(cache, object);
-
-               switch (alloc_info->state) {
-               case KASAN_STATE_ALLOC:
-                       alloc_info->state = KASAN_STATE_QUARANTINE;
-                       quarantine_put(free_info, cache);
-                       set_track(&free_info->track, GFP_NOWAIT);
-                       kasan_poison_slab_free(cache, object);
-                       return true;
-               case KASAN_STATE_QUARANTINE:
-               case KASAN_STATE_FREE:
-                       pr_err("Double free");
-                       dump_stack();
-                       break;
-               default:
-                       break;
-               }
+       shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
+       if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
+               kasan_report_double_free(cache, object, shadow_byte);
+               return true;
        }
-       return false;
-#else
+
        kasan_poison_slab_free(cache, object);
-       return false;
-#endif
+
+       if (unlikely(!(cache->flags & SLAB_KASAN)))
+               return false;
+
+       set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
+       quarantine_put(get_free_info(cache, object), cache);
+       return true;
 }
 
 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
@@ -562,7 +563,7 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
        unsigned long redzone_start;
        unsigned long redzone_end;
 
-       if (flags & __GFP_RECLAIM)
+       if (gfpflags_allow_blocking(flags))
                quarantine_reduce();
 
        if (unlikely(object == NULL))
@@ -576,16 +577,9 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
        kasan_unpoison_shadow(object, size);
        kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
                KASAN_KMALLOC_REDZONE);
-#ifdef CONFIG_SLAB
-       if (cache->flags & SLAB_KASAN) {
-               struct kasan_alloc_meta *alloc_info =
-                       get_alloc_info(cache, object);
-
-               alloc_info->state = KASAN_STATE_ALLOC;
-               alloc_info->alloc_size = size;
-               set_track(&alloc_info->track, flags);
-       }
-#endif
+
+       if (cache->flags & SLAB_KASAN)
+               set_track(&get_alloc_info(cache, object)->alloc_track, flags);
 }
 EXPORT_SYMBOL(kasan_kmalloc);
 
@@ -595,7 +589,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
        unsigned long redzone_start;
        unsigned long redzone_end;
 
-       if (flags & __GFP_RECLAIM)
+       if (gfpflags_allow_blocking(flags))
                quarantine_reduce();
 
        if (unlikely(ptr == NULL))