.name = "kmem_cache",
};
+#define BAD_ALIEN_MAGIC 0x01020304ul
+
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
static struct array_cache *alloc_arraycache(int node, int entries,
int batchcount, gfp_t gfp)
{
- int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
+ size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
struct array_cache *ac = NULL;
ac = kmalloc_node(memsize, gfp, node);
static inline struct alien_cache **alloc_alien_cache(int node,
int limit, gfp_t gfp)
{
- return NULL;
+ return (struct alien_cache **)BAD_ALIEN_MAGIC;
}
static inline void free_alien_cache(struct alien_cache **ac_ptr)
static struct alien_cache *__alloc_alien_cache(int node, int entries,
int batch, gfp_t gfp)
{
- int memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
+ size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
struct alien_cache *alc = NULL;
alc = kmalloc_node(memsize, gfp, node);
static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
{
struct alien_cache **alc_ptr;
- int memsize = sizeof(void *) * nr_node_ids;
+ size_t memsize = sizeof(void *) * nr_node_ids;
int i;
if (limit > 1)
{
struct kmem_cache *cachep;
struct kmem_cache_node *n;
- const int memsize = sizeof(struct kmem_cache_node);
+ const size_t memsize = sizeof(struct kmem_cache_node);
list_for_each_entry(cachep, &slab_caches, list) {
/*
}
/*
- * Interface to system's page allocator. No need to hold the cache-lock.
+ * Interface to system's page allocator. No need to hold the
+ * kmem_cache_node ->list_lock.
*
* If we requested dmaable memory, we will get it. Even if we
* did not request dmaable memory, we might get it, but that
* @cachep: cache pointer being destroyed
* @page: page pointer being destroyed
*
- * Destroy all the objs in a slab, and release the mem back to the system.
- * Before calling the slab must have been unlinked from the cache. The
- * cache-lock is not held/needed.
+ * Destroy all the objs in a slab page, and release the mem back to the system.
+ * Before calling the slab page must have been unlinked from the cache. The
+ * kmem_cache_node ->list_lock is not held/needed.
*/
static void slab_destroy(struct kmem_cache *cachep, struct page *page)
{