static void set_page_extent_head(struct page *page, unsigned long len)
{
+ WARN_ON(!PagePrivate(page));
set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
}
* at this point we can safely clear everything except the
* locked bit and the nodatasum bit
*/
- clear_extent_bit(tree, start, end,
+ ret = clear_extent_bit(tree, start, end,
~(EXTENT_LOCKED | EXTENT_NODATASUM),
0, 0, NULL, mask);
+
+ /* if clear_extent_bit failed for enomem reasons,
+ * we can't allow the release to continue.
+ */
+ if (ret < 0)
+ ret = 0;
+ else
+ ret = 1;
}
return ret;
}
}
if (!PageUptodate(p))
uptodate = 0;
- unlock_page(p);
+
+ /*
+ * see below about how we avoid a nasty race with release page
+ * and why we unlock later
+ */
+ if (i != 0)
+ unlock_page(p);
}
if (uptodate)
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
atomic_inc(&eb->refs);
spin_unlock(&tree->buffer_lock);
radix_tree_preload_end();
+
+ /*
+ * there is a race where release page may have
+ * tried to find this extent buffer in the radix
+ * but failed. It will tell the VM it is safe to
+ * reclaim the, and it will clear the page private bit.
+ * We must make sure to set the page private bit properly
+ * after the extent buffer is in the radix tree so
+ * it doesn't get lost
+ */
+ set_page_extent_mapped(eb->first_page);
+ set_page_extent_head(eb->first_page, eb->len);
+ if (!page0)
+ unlock_page(eb->first_page);
return eb;
free_eb:
+ if (eb->first_page && !page0)
+ unlock_page(eb->first_page);
+
if (!atomic_dec_and_test(&eb->refs))
return exists;
btrfs_release_extent_buffer(eb);
continue;
lock_page(page);
+ WARN_ON(!PagePrivate(page));
+
+ set_page_extent_mapped(page);
if (i == 0)
set_page_extent_head(page, eb->len);
- else
- set_page_private(page, EXTENT_PAGE_PRIVATE);
clear_page_dirty_for_io(page);
spin_lock_irq(&page->mapping->tree_lock);
for (i = start_i; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
+
+ WARN_ON(!PagePrivate(page));
+
+ set_page_extent_mapped(page);
+ if (i == 0)
+ set_page_extent_head(page, eb->len);
+
if (inc_all_pages)
page_cache_get(page);
if (!PageUptodate(page)) {
async_extent->ram_size - 1, 0);
em = alloc_extent_map(GFP_NOFS);
+ BUG_ON(!em);
em->start = async_extent->start;
em->len = async_extent->ram_size;
em->orig_start = em->start;
BUG_ON(ret);
em = alloc_extent_map(GFP_NOFS);
+ BUG_ON(!em);
em->start = start;
em->orig_start = em->start;
ram_size = ins.offset;
struct extent_map_tree *em_tree;
em_tree = &BTRFS_I(inode)->extent_tree;
em = alloc_extent_map(GFP_NOFS);
+ BUG_ON(!em);
em->start = cur_offset;
em->orig_start = em->start;
em->len = num_bytes;
int num_types = 4;
int alloc_size;
int ret = 0;
- int slot_count = 0;
+ u64 slot_count = 0;
int i, c;
if (copy_from_user(&space_args,
goto out;
}
- slot_count = min_t(int, space_args.space_slots, slot_count);
+ slot_count = min_t(u64, space_args.space_slots, slot_count);
alloc_size = sizeof(*dest) * slot_count;
for (i = 0; i < num_types; i++) {
struct btrfs_space_info *tmp;
+ if (!slot_count)
+ break;
+
info = NULL;
rcu_read_lock();
list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
memcpy(dest, &space, sizeof(space));
dest++;
space_args.total_spaces++;
+ slot_count--;
}
+ if (!slot_count)
+ break;
}
up_read(&info->groups_sem);
}