{
void *ret = slab_alloc(cachep, flags, _RET_IP_);
- kasan_slab_alloc(cachep, ret);
+ kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);
ret = slab_alloc(cachep, flags, _RET_IP_);
- kasan_kmalloc(cachep, ret, size);
+ kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(_RET_IP_, ret,
size, cachep->size, flags);
return ret;
{
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
- kasan_slab_alloc(cachep, ret);
+ kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size,
flags, nodeid);
void *ret;
ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
- kasan_kmalloc(cachep, ret, size);
+
+ kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc_node(_RET_IP_, ret,
size, cachep->size,
flags, nodeid);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
- kasan_kmalloc(cachep, ret, size);
+ kasan_kmalloc(cachep, ret, size, flags);
return ret;
}
return cachep;
ret = slab_alloc(cachep, flags, caller);
- kasan_kmalloc(cachep, ret, size);
+ kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(caller, ret,
size, cachep->size, flags);
/* We assume that ksize callers could use the whole allocated area,
* so we need to unpoison this area.
*/
- kasan_krealloc(objp, size);
+ kasan_krealloc(objp, size, GFP_NOWAIT);
return size;
}