virtio_mmio: Use the DMA API if enabled
[cascardo/linux.git] / mm / vmalloc.c
index 8e3c9c5..fb42a5b 100644 (file)
@@ -441,8 +441,7 @@ nocache:
                if (list_is_last(&first->list, &vmap_area_list))
                        goto found;
 
-               first = list_entry(first->list.next,
-                               struct vmap_area, list);
+               first = list_next_entry(first, list);
        }
 
 found:
@@ -456,7 +455,7 @@ found:
        free_vmap_cache = &va->rb_node;
        spin_unlock(&vmap_area_lock);
 
-       BUG_ON(va->va_start & (align-1));
+       BUG_ON(!IS_ALIGNED(va->va_start, align));
        BUG_ON(va->va_start < vstart);
        BUG_ON(va->va_end > vend);
 
@@ -1087,7 +1086,7 @@ void vm_unmap_ram(const void *mem, unsigned int count)
        BUG_ON(!addr);
        BUG_ON(addr < VMALLOC_START);
        BUG_ON(addr > VMALLOC_END);
-       BUG_ON(addr & (PAGE_SIZE-1));
+       BUG_ON(!IS_ALIGNED(addr, PAGE_SIZE));
 
        debug_check_no_locks_freed(mem, size);
        vmap_debug_free_range(addr, addr+size);
@@ -1477,13 +1476,10 @@ static void __vunmap(const void *addr, int deallocate_pages)
                        struct page *page = area->pages[i];
 
                        BUG_ON(!page);
-                       __free_page(page);
+                       __free_kmem_pages(page, 0);
                }
 
-               if (area->flags & VM_VPAGES)
-                       vfree(area->pages);
-               else
-                       kfree(area->pages);
+               kvfree(area->pages);
        }
 
        kfree(area);
@@ -1593,7 +1589,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        if (array_size > PAGE_SIZE) {
                pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
                                PAGE_KERNEL, node, area->caller);
-               area->flags |= VM_VPAGES;
        } else {
                pages = kmalloc_node(array_size, nested_gfp, node);
        }
@@ -1608,9 +1603,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                struct page *page;
 
                if (node == NUMA_NO_NODE)
-                       page = alloc_page(alloc_mask);
+                       page = alloc_kmem_pages(alloc_mask, order);
                else
-                       page = alloc_pages_node(node, alloc_mask, order);
+                       page = alloc_kmem_pages_node(node, alloc_mask, order);
 
                if (unlikely(!page)) {
                        /* Successfully allocated i pages, free them in __vunmap() */
@@ -2559,10 +2554,10 @@ static void *s_start(struct seq_file *m, loff_t *pos)
        struct vmap_area *va;
 
        spin_lock(&vmap_area_lock);
-       va = list_entry((&vmap_area_list)->next, typeof(*va), list);
+       va = list_first_entry(&vmap_area_list, typeof(*va), list);
        while (n > 0 && &va->list != &vmap_area_list) {
                n--;
-               va = list_entry(va->list.next, typeof(*va), list);
+               va = list_next_entry(va, list);
        }
        if (!n && &va->list != &vmap_area_list)
                return va;
@@ -2576,7 +2571,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
        struct vmap_area *va = p, *next;
 
        ++*pos;
-       next = list_entry(va->list.next, typeof(*va), list);
+       next = list_next_entry(va, list);
        if (&next->list != &vmap_area_list)
                return next;
 
@@ -2651,7 +2646,7 @@ static int s_show(struct seq_file *m, void *p)
        if (v->flags & VM_USERMAP)
                seq_puts(m, " user");
 
-       if (v->flags & VM_VPAGES)
+       if (is_vmalloc_addr(v->pages))
                seq_puts(m, " vpages");
 
        show_numa_info(m, v);