2 * Copyright (C) 2003 Sistina Software
4 * This file is released under the GPL.
13 struct block_device *bdev;
19 struct page_list *next;
24 * 'error' is a bitset, with each bit indicating whether an error
25 * occurred doing io to the corresponding region.
27 typedef void (*io_notify_fn)(unsigned long error, void *context);
30 DM_IO_PAGE_LIST,/* Page list */
31 DM_IO_BVEC, /* Bio vector */
32 DM_IO_VMA, /* Virtual memory area */
33 DM_IO_KMEM, /* Kernel memory */
37 enum dm_io_mem_type type;
50 io_notify_fn fn; /* Callback for asynchronous requests */
51 void *context; /* Passed to callback */
55 * IO request structure
58 struct dm_io_request {
59 int bi_rw; /* READ|WRITE - not READA */
60 struct dm_io_memory mem; /* Memory to use for io */
61 struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
62 struct dm_io_client *client; /* Client memory handler */
66 * Before anyone uses the IO interface they should call
67 * dm_io_get(), specifying roughly how many pages they are
68 * expecting to perform io on concurrently.
70 * This function may block.
72 int dm_io_get(unsigned int num_pages);
73 void dm_io_put(unsigned int num_pages);
76 * For async io calls, users can alternatively use the dm_io() function below
77 * and dm_io_client_create() to create private mempools for the client.
79 * Create/destroy may block.
81 struct dm_io_client *dm_io_client_create(unsigned num_pages);
82 int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client);
83 void dm_io_client_destroy(struct dm_io_client *client);
88 * Please ensure that the rw flag in the next two functions is
89 * either READ or WRITE, ie. we don't take READA. Any
90 * regions with a zero count field will be ignored.
92 int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
93 struct page_list *pl, unsigned int offset,
94 unsigned long *error_bits);
96 int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
97 struct bio_vec *bvec, unsigned long *error_bits);
99 int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
100 void *data, unsigned long *error_bits);
105 * The 'where' array may be safely allocated on the stack since
106 * the function takes a copy.
108 int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
109 struct page_list *pl, unsigned int offset,
110 io_notify_fn fn, void *context);
112 int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
113 struct bio_vec *bvec, io_notify_fn fn, void *context);
115 int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
116 void *data, io_notify_fn fn, void *context);
119 * IO interface using private per-client pools.
121 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
122 struct io_region *region, unsigned long *sync_error_bits);