2 * Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org>
4 * Device-Mapper block hash tree interface.
5 * See Documentation/device-mapper/dm-bht.txt for details.
7 * This file is released under the GPL.
10 #include <asm/atomic.h>
12 #include <linux/bitops.h> /* for fls() */
13 #include <linux/bug.h>
14 #include <linux/cpumask.h> /* nr_cpu_ids */
15 /* #define CONFIG_DM_DEBUG 1 */
16 #include <linux/device-mapper.h>
17 #include <linux/err.h>
18 #include <linux/errno.h>
19 #include <linux/gfp.h>
20 #include <linux/dm-bht.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/mm_types.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h> /* k*alloc */
26 #include <linux/string.h> /* memset */
28 #define DM_MSG_PREFIX "dm bht"
30 /* For sector formatting. */
31 #if defined(_LP64) || defined(__LP64__) || __BITS_PER_LONG == 64
32 #define __PRIS_PREFIX "z"
34 #define __PRIS_PREFIX "ll"
36 #define PRIu64 __PRIS_PREFIX "u"
39 /*-----------------------------------------------
41 *-----------------------------------------------*/
43 static u8 from_hex(u8 ch)
45 if ((ch >= '0') && (ch <= '9'))
47 if ((ch >= 'a') && (ch <= 'f'))
49 if ((ch >= 'A') && (ch <= 'F'))
55 * dm_bht_bin_to_hex - converts a binary stream to human-readable hex
56 * @binary: a byte array of length @binary_len
57 * @hex: a byte array of length @binary_len * 2 + 1
59 static void dm_bht_bin_to_hex(u8 *binary, u8 *hex, unsigned int binary_len)
61 while (binary_len-- > 0) {
62 sprintf((char *__restrict__)hex, "%02hhx", (int)*binary);
69 * dm_bht_hex_to_bin - converts a hex stream to binary
70 * @binary: a byte array of length @binary_len
71 * @hex: a byte array of length @binary_len * 2 + 1
73 static void dm_bht_hex_to_bin(u8 *binary, const u8 *hex,
74 unsigned int binary_len)
76 while (binary_len-- > 0) {
77 *binary = from_hex(*(hex++));
79 *binary += from_hex(*(hex++));
84 static void dm_bht_log_mismatch(struct dm_bht *bht, u8 *given, u8 *computed)
86 u8 given_hex[DM_BHT_MAX_DIGEST_SIZE * 2 + 1];
87 u8 computed_hex[DM_BHT_MAX_DIGEST_SIZE * 2 + 1];
88 dm_bht_bin_to_hex(given, given_hex, bht->digest_size);
89 dm_bht_bin_to_hex(computed, computed_hex, bht->digest_size);
90 DMERR_LIMIT("%s != %s", given_hex, computed_hex);
93 /* Used for turning verifiers into computers */
94 typedef int (*dm_bht_compare_cb)(struct dm_bht *, u8 *, u8 *);
97 * dm_bht_compute_hash: hashes a page of data
99 static int dm_bht_compute_hash(struct dm_bht *bht, struct page *pg,
100 unsigned int offset, u8 *digest)
102 struct hash_desc *hash_desc = &bht->hash_desc[smp_processor_id()];
103 struct scatterlist sg;
105 sg_init_table(&sg, 1);
106 sg_set_page(&sg, pg, PAGE_SIZE, offset);
107 /* Note, this is synchronous. */
108 if (crypto_hash_init(hash_desc)) {
109 DMCRIT("failed to reinitialize crypto hash (proc:%d)",
113 if (crypto_hash_update(hash_desc, &sg, PAGE_SIZE)) {
114 DMCRIT("crypto_hash_update failed");
117 if (bht->have_salt) {
118 sg_set_buf(&sg, bht->salt, sizeof(bht->salt));
119 if (crypto_hash_update(hash_desc, &sg, sizeof(bht->salt))) {
120 DMCRIT("crypto_hash_update failed");
124 if (crypto_hash_final(hash_desc, digest)) {
125 DMCRIT("crypto_hash_final failed");
132 static __always_inline struct dm_bht_level *dm_bht_get_level(struct dm_bht *bht,
135 return &bht->levels[depth];
138 static __always_inline unsigned int dm_bht_get_level_shift(struct dm_bht *bht,
141 return (bht->depth - depth) * bht->node_count_shift;
144 /* For the given depth, this is the entry index. At depth+1 it is the node
147 static __always_inline unsigned int dm_bht_index_at_level(struct dm_bht *bht,
151 return leaf >> dm_bht_get_level_shift(bht, depth);
154 static __always_inline u8 *dm_bht_node(struct dm_bht *bht,
155 struct dm_bht_entry *entry,
156 unsigned int node_index)
158 return &entry->nodes[node_index * bht->digest_size];
161 static inline struct dm_bht_entry *dm_bht_get_entry(struct dm_bht *bht,
165 unsigned int index = dm_bht_index_at_level(bht, depth, block);
166 struct dm_bht_level *level = dm_bht_get_level(bht, depth);
168 BUG_ON(index >= level->count);
170 return &level->entries[index];
173 static inline u8 *dm_bht_get_node(struct dm_bht *bht,
174 struct dm_bht_entry *entry,
178 unsigned int index = dm_bht_index_at_level(bht, depth, block);
180 return dm_bht_node(bht, entry, index % bht->node_count);
184 /*-----------------------------------------------
185 * Implementation functions
186 *-----------------------------------------------*/
188 static int dm_bht_initialize_entries(struct dm_bht *bht);
190 static int dm_bht_read_callback_stub(void *ctx, sector_t start, u8 *dst,
192 struct dm_bht_entry *entry);
193 static int dm_bht_write_callback_stub(void *ctx, sector_t start,
194 u8 *dst, sector_t count,
195 struct dm_bht_entry *entry);
198 * dm_bht_create - prepares @bht for us
199 * @bht: pointer to a dm_bht_create()d bht
200 * @depth: tree depth without the root; including block hashes
201 * @block_count:the number of block hashes / tree leaves
202 * @alg_name: crypto hash algorithm name
204 * Returns 0 on success.
206 * Callers can offset into devices by storing the data in the io callbacks.
207 * TODO(wad) bust up into smaller helpers
209 int dm_bht_create(struct dm_bht *bht, unsigned int block_count,
210 const char *alg_name)
215 bht->have_salt = false;
217 /* Setup the hash first. Its length determines much of the bht layout */
218 for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
219 bht->hash_desc[cpu].tfm = crypto_alloc_hash(alg_name, 0, 0);
220 if (IS_ERR(bht->hash_desc[cpu].tfm)) {
221 DMERR("failed to allocate crypto hash '%s'", alg_name);
223 bht->hash_desc[cpu].tfm = NULL;
227 bht->digest_size = crypto_hash_digestsize(bht->hash_desc[0].tfm);
228 /* We expect to be able to pack >=2 hashes into a page */
229 if (PAGE_SIZE / bht->digest_size < 2) {
230 DMERR("too few hashes fit in a page");
235 if (bht->digest_size > DM_BHT_MAX_DIGEST_SIZE) {
236 DMERR("DM_BHT_MAX_DIGEST_SIZE too small for chosen digest");
241 /* Configure the tree */
242 bht->block_count = block_count;
243 DMDEBUG("Setting block_count %u", block_count);
244 if (block_count == 0) {
245 DMERR("block_count must be non-zero");
247 goto bad_block_count;
250 /* Each dm_bht_entry->nodes is one page. The node code tracks
251 * how many nodes fit into one entry where a node is a single
252 * hash (message digest).
254 bht->node_count_shift = fls(PAGE_SIZE / bht->digest_size) - 1;
255 /* Round down to the nearest power of two. This makes indexing
256 * into the tree much less painful.
258 bht->node_count = 1 << bht->node_count_shift;
260 /* This is unlikely to happen, but with 64k pages, who knows. */
261 if (bht->node_count > UINT_MAX / bht->digest_size) {
262 DMERR("node_count * hash_len exceeds UINT_MAX!");
267 bht->depth = DIV_ROUND_UP(fls(block_count - 1), bht->node_count_shift);
268 DMDEBUG("Setting depth to %d.", bht->depth);
270 /* Ensure that we can safely shift by this value. */
271 if (bht->depth * bht->node_count_shift >= sizeof(unsigned int) * 8) {
272 DMERR("specified depth and node_count_shift is too large");
277 /* Allocate levels. Each level of the tree may have an arbitrary number
278 * of dm_bht_entry structs. Each entry contains node_count nodes.
279 * Each node in the tree is a cryptographic digest of either node_count
280 * nodes on the subsequent level or of a specific block on disk.
282 bht->levels = (struct dm_bht_level *)
284 sizeof(struct dm_bht_level), GFP_KERNEL);
286 DMERR("failed to allocate tree levels");
288 goto bad_level_alloc;
291 /* Setup callback stubs */
292 bht->read_cb = &dm_bht_read_callback_stub;
293 bht->write_cb = &dm_bht_write_callback_stub;
295 status = dm_bht_initialize_entries(bht);
297 goto bad_entries_alloc;
299 /* We compute depth such that there is only be 1 block at level 0. */
300 BUG_ON(bht->levels[0].count != 1);
305 while (bht->depth-- > 0)
306 kfree(bht->levels[bht->depth].entries);
313 for (cpu = 0; cpu < nr_cpu_ids; ++cpu)
314 if (bht->hash_desc[cpu].tfm)
315 crypto_free_hash(bht->hash_desc[cpu].tfm);
318 EXPORT_SYMBOL(dm_bht_create);
320 static int dm_bht_initialize_entries(struct dm_bht *bht)
322 /* The last_index represents the index into the last
323 * block digest that will be stored in the tree. By walking the
324 * tree with that index, it is possible to compute the total number
325 * of entries needed at each level in the tree.
327 * Since each entry will contain up to |node_count| nodes of the tree,
328 * it is possible that the last index may not be at the end of a given
329 * entry->nodes. In that case, it is assumed the value is padded.
331 * Note, we treat both the tree root (1 hash) and the tree leaves
332 * independently from the bht data structures. Logically, the root is
333 * depth=-1 and the block layer level is depth=bht->depth
335 unsigned int last_index = ALIGN(bht->block_count, bht->node_count) - 1;
336 unsigned int total_entries = 0;
337 struct dm_bht_level *level = NULL;
340 /* check that the largest level->count can't result in an int overflow
341 * on allocation or sector calculation.
343 if (((last_index >> bht->node_count_shift) + 1) >
344 UINT_MAX / max((unsigned int)sizeof(struct dm_bht_entry),
345 (unsigned int)to_sector(PAGE_SIZE))) {
346 DMCRIT("required entries %u is too large",
351 /* Track the current sector location for each level so we don't have to
352 * compute it during traversals.
355 for (depth = 0; depth < bht->depth; ++depth) {
356 level = dm_bht_get_level(bht, depth);
357 level->count = dm_bht_index_at_level(bht, depth,
359 DMDEBUG("depth: %d entries: %u", depth, level->count);
360 /* TODO(wad) consider the case where the data stored for each
361 * level is done with contiguous pages (instead of using
362 * entry->nodes) and the level just contains two bitmaps:
363 * (a) which pages have been loaded from disk
364 * (b) which specific nodes have been verified.
366 level->entries = (struct dm_bht_entry *)
367 kcalloc(level->count,
368 sizeof(struct dm_bht_entry),
370 if (!level->entries) {
371 DMERR("failed to allocate entries for depth %d",
373 /* let the caller clean up the mess */
376 total_entries += level->count;
377 level->sector = bht->sectors;
378 /* number of sectors per entry * entries at this level */
379 bht->sectors += level->count * to_sector(PAGE_SIZE);
380 /* not ideal, but since unsigned overflow behavior is defined */
381 if (bht->sectors < level->sector) {
382 DMCRIT("level sector calculation overflowed");
390 static int dm_bht_read_callback_stub(void *ctx, sector_t start, u8 *dst,
391 sector_t count, struct dm_bht_entry *entry)
393 DMCRIT("dm_bht_read_callback_stub called!");
394 dm_bht_read_completed(entry, -EIO);
398 static int dm_bht_write_callback_stub(void *ctx, sector_t start,
399 u8 *dst, sector_t count,
400 struct dm_bht_entry *entry)
402 DMCRIT("dm_bht_write_callback_stub called!");
403 dm_bht_write_completed(entry, -EIO);
408 * dm_bht_read_completed
409 * @entry: pointer to the entry that's been loaded
410 * @status: I/O status. Non-zero is failure.
411 * MUST always be called after a read_cb completes.
413 void dm_bht_read_completed(struct dm_bht_entry *entry, int status)
416 /* TODO(wad) add retry support */
417 DMCRIT("an I/O error occurred while reading entry");
418 atomic_set(&entry->state, DM_BHT_ENTRY_ERROR_IO);
419 /* entry->nodes will be freed later */
422 BUG_ON(atomic_read(&entry->state) != DM_BHT_ENTRY_PENDING);
423 atomic_set(&entry->state, DM_BHT_ENTRY_READY);
425 EXPORT_SYMBOL(dm_bht_read_completed);
428 * dm_bht_write_completed
429 * @entry: pointer to the entry that's been loaded
430 * @status: I/O status. Non-zero is failure.
431 * Should be called after a write_cb completes. Currently only catches
432 * errors which more writers don't care about.
434 void dm_bht_write_completed(struct dm_bht_entry *entry, int status)
437 DMCRIT("an I/O error occurred while writing entry");
438 atomic_set(&entry->state, DM_BHT_ENTRY_ERROR_IO);
439 /* entry->nodes will be freed later */
443 EXPORT_SYMBOL(dm_bht_write_completed);
445 /* dm_bht_verify_path
446 * Verifies the path. Returns 0 on ok.
448 static int dm_bht_verify_path(struct dm_bht *bht, unsigned int block,
449 struct page *pg, unsigned int offset)
451 int depth = bht->depth;
452 u8 digest[DM_BHT_MAX_DIGEST_SIZE];
453 struct dm_bht_entry *entry;
458 /* Need to check that the hash of the current block is accurate
461 entry = dm_bht_get_entry(bht, depth - 1, block);
462 state = atomic_read(&entry->state);
463 /* This call is only safe if all nodes along the path
464 * are already populated (i.e. READY) via dm_bht_populate.
466 BUG_ON(state < DM_BHT_ENTRY_READY);
467 node = dm_bht_get_node(bht, entry, depth, block);
469 if (dm_bht_compute_hash(bht, pg, offset, digest) ||
470 memcmp(digest, node, bht->digest_size))
473 /* Keep the containing block of hashes to be verified in the
476 pg = virt_to_page(entry->nodes);
478 } while (--depth > 0 && state != DM_BHT_ENTRY_VERIFIED);
480 if (depth == 0 && state != DM_BHT_ENTRY_VERIFIED) {
481 if (dm_bht_compute_hash(bht, pg, offset, digest) ||
482 memcmp(digest, bht->root_digest, bht->digest_size))
484 atomic_set(&entry->state, DM_BHT_ENTRY_VERIFIED);
487 /* Mark path to leaf as verified. */
488 for (depth++; depth < bht->depth; depth++) {
489 entry = dm_bht_get_entry(bht, depth, block);
490 /* At this point, entry can only be in VERIFIED or READY state.
491 * So it is safe to use atomic_set instead of atomic_cmpxchg.
493 atomic_set(&entry->state, DM_BHT_ENTRY_VERIFIED);
496 DMDEBUG("verify_path: node %u is verified to root", block);
500 DMERR_LIMIT("verify_path: failed to verify hash (d=%d,bi=%u)",
502 dm_bht_log_mismatch(bht, node, digest);
503 return DM_BHT_ENTRY_ERROR_MISMATCH;
507 * dm_bht_store_block - sets a given block's hash in the tree
508 * @bht: pointer to a dm_bht_create()d bht
509 * @block: numeric index of the block in the tree
510 * @digest: array of u8s containing the digest of length @bht->digest_size
512 * Returns 0 on success, >0 when data is pending, and <0 when a IO or other
513 * error has occurred.
515 * If the containing entry in the tree is unallocated, it will allocate memory
516 * and mark the entry as ready. All other block entries will be 0s. This
517 * function is not safe for simultaneous use when verifying data and should not
518 * be used if the @bht is being accessed by any other functions in any other
521 * It is expected that virt_to_page will work on |block_data|.
523 int dm_bht_store_block(struct dm_bht *bht, unsigned int block,
528 unsigned int node_index;
529 struct dm_bht_entry *entry;
530 struct dm_bht_level *level;
532 struct page *node_page = NULL;
534 /* Look at the last level of nodes above the leaves (data blocks) */
535 depth = bht->depth - 1;
537 /* Index into the level */
538 level = dm_bht_get_level(bht, depth);
539 index = dm_bht_index_at_level(bht, depth, block);
540 /* Grab the node index into the current entry by getting the
541 * index at the leaf-level.
543 node_index = dm_bht_index_at_level(bht, depth + 1, block) %
545 entry = &level->entries[index];
547 DMDEBUG("Storing block %u in d=%d,ei=%u,ni=%u,s=%d",
548 block, depth, index, node_index,
549 atomic_read(&entry->state));
551 state = atomic_cmpxchg(&entry->state,
552 DM_BHT_ENTRY_UNALLOCATED,
553 DM_BHT_ENTRY_PENDING);
554 /* !!! Note. It is up to the users of the update interface to
555 * ensure the entry data is fully populated prior to use.
556 * The number of updated entries is NOT tracked.
558 if (state == DM_BHT_ENTRY_UNALLOCATED) {
559 node_page = alloc_page(GFP_KERNEL);
561 atomic_set(&entry->state, DM_BHT_ENTRY_ERROR);
564 entry->nodes = page_address(node_page);
565 memset(entry->nodes, 0, PAGE_SIZE);
566 /* TODO(wad) could expose this to the caller to that they
567 * can transition from unallocated to ready manually.
569 atomic_set(&entry->state, DM_BHT_ENTRY_READY);
570 } else if (state <= DM_BHT_ENTRY_ERROR) {
571 DMCRIT("leaf entry for block %u is invalid",
574 } else if (state == DM_BHT_ENTRY_PENDING) {
575 DMERR("leaf data is pending for block %u", block);
579 dm_bht_compute_hash(bht, virt_to_page(block_data), 0,
580 dm_bht_node(bht, entry, node_index));
583 EXPORT_SYMBOL(dm_bht_store_block);
586 * dm_bht_zeroread_callback - read callback which always returns 0s
589 * @data: buffer to write 0s to
590 * @count: number of sectors worth of data to write
591 * @complete_ctx: opaque context for @completed
592 * @completed: callback to confirm end of data read
596 * Meant for use by dm_compute() callers. It allows dm_populate to
597 * be used to pre-fill a tree with zeroed out entry nodes.
599 int dm_bht_zeroread_callback(void *ctx, sector_t start, u8 *dst,
600 sector_t count, struct dm_bht_entry *entry)
602 memset(dst, 0, to_bytes(count));
603 dm_bht_read_completed(entry, 0);
606 EXPORT_SYMBOL(dm_bht_zeroread_callback);
609 * dm_bht_compute - computes and updates all non-block-level hashes in a tree
610 * @bht: pointer to a dm_bht_create()d bht
611 * @read_cb_ctx:opaque read_cb context for all I/O on this call
613 * Returns 0 on success, >0 when data is pending, and <0 when a IO or other
614 * error has occurred.
616 * Walks the tree and computes the hashes at each level from the
617 * hashes below. This can only be called once per tree creation
618 * since it will mark entries verified. Expects dm_bht_populate() to
619 * correctly populate the tree from the read_callback_stub.
621 * This function should not be used when verifying the same tree and
622 * should not be used with multiple simultaneous operators on @bht.
624 int dm_bht_compute(struct dm_bht *bht, void *read_cb_ctx)
628 for (depth = bht->depth - 2; depth >= 0; depth--) {
629 struct dm_bht_level *level = dm_bht_get_level(bht, depth);
630 struct dm_bht_level *child_level = level + 1;
631 struct dm_bht_entry *entry = level->entries;
632 struct dm_bht_entry *child = child_level->entries;
635 for (i = 0; i < level->count; i++, entry++) {
636 unsigned int count = bht->node_count;
639 pg = alloc_page(GFP_NOIO);
641 DMCRIT("an error occurred while reading entry");
645 entry->nodes = page_address(pg);
646 memset(entry->nodes, 0, PAGE_SIZE);
647 atomic_set(&entry->state, DM_BHT_ENTRY_READY);
649 if (i == (level->count - 1))
650 count = child_level->count % bht->node_count;
652 count = bht->node_count;
653 for (j = 0; j < count; j++, child++) {
654 struct page *pg = virt_to_page(child->nodes);
655 u8 *digest = dm_bht_node(bht, entry, j);
657 r = dm_bht_compute_hash(bht, pg, 0, digest);
659 DMERR("Failed to update (d=%d,i=%u)",
666 r = dm_bht_compute_hash(bht,
667 virt_to_page(bht->levels[0].entries->nodes),
668 0, bht->root_digest);
670 DMERR("Failed to update root hash");
675 EXPORT_SYMBOL(dm_bht_compute);
678 * dm_bht_sync - writes the tree in memory to disk
679 * @bht: pointer to a dm_bht_create()d bht
680 * @write_ctx: callback context for writes issued
682 * Since all entry nodes are PAGE_SIZE, the data will be pre-aligned and
685 int dm_bht_sync(struct dm_bht *bht, void *write_cb_ctx)
691 struct dm_bht_level *level;
692 struct dm_bht_entry *entry;
693 struct dm_bht_entry *entry_end;
695 for (depth = 0; depth < bht->depth; ++depth) {
696 level = dm_bht_get_level(bht, depth);
697 entry_end = level->entries + level->count;
698 sector = level->sector;
699 for (entry = level->entries; entry < entry_end; ++entry) {
700 state = atomic_read(&entry->state);
701 if (state <= DM_BHT_ENTRY_PENDING) {
702 DMERR("At depth %d, entry %lu is not ready",
704 (unsigned long)(entry - level->entries));
707 ret = bht->write_cb(write_cb_ctx,
710 to_sector(PAGE_SIZE),
713 DMCRIT("an error occurred writing entry %lu",
714 (unsigned long)(entry - level->entries));
717 sector += to_sector(PAGE_SIZE);
723 EXPORT_SYMBOL(dm_bht_sync);
726 * dm_bht_is_populated - check that entries from disk needed to verify a given
727 * block are all ready
728 * @bht: pointer to a dm_bht_create()d bht
729 * @block: specific block data is expected from
731 * Callers may wish to call dm_bht_is_populated() when checking an io
732 * for which entries were already pending.
734 bool dm_bht_is_populated(struct dm_bht *bht, unsigned int block)
738 for (depth = bht->depth - 1; depth >= 0; depth--) {
739 struct dm_bht_entry *entry = dm_bht_get_entry(bht, depth,
741 if (atomic_read(&entry->state) < DM_BHT_ENTRY_READY)
747 EXPORT_SYMBOL(dm_bht_is_populated);
750 * dm_bht_populate - reads entries from disk needed to verify a given block
751 * @bht: pointer to a dm_bht_create()d bht
752 * @ctx: context used for all read_cb calls on this request
753 * @block: specific block data is expected from
755 * Returns negative value on error. Returns 0 on success.
757 int dm_bht_populate(struct dm_bht *bht, void *ctx,
763 BUG_ON(block >= bht->block_count);
765 DMDEBUG("dm_bht_populate(%u)", block);
767 for (depth = bht->depth - 1; depth >= 0; --depth) {
768 struct dm_bht_level *level;
769 struct dm_bht_entry *entry;
773 entry = dm_bht_get_entry(bht, depth, block);
774 state = atomic_cmpxchg(&entry->state,
775 DM_BHT_ENTRY_UNALLOCATED,
776 DM_BHT_ENTRY_PENDING);
778 if (state == DM_BHT_ENTRY_VERIFIED)
780 if (state <= DM_BHT_ENTRY_ERROR)
782 if (state != DM_BHT_ENTRY_UNALLOCATED)
785 /* Current entry is claimed for allocation and loading */
786 pg = alloc_page(GFP_NOIO);
790 /* dm-bht guarantees page-aligned memory for callbacks. */
791 entry->nodes = page_address(pg);
793 /* TODO(wad) error check callback here too */
795 level = &bht->levels[depth];
796 index = dm_bht_index_at_level(bht, depth, block);
797 bht->read_cb(ctx, level->sector + to_sector(index * PAGE_SIZE),
798 entry->nodes, to_sector(PAGE_SIZE), entry);
804 DMCRIT("block %u at depth %d is in an error state", block, depth);
808 DMCRIT("failed to allocate memory for entry->nodes");
811 EXPORT_SYMBOL(dm_bht_populate);
815 * dm_bht_verify_block - checks that all nodes in the path for @block are valid
816 * @bht: pointer to a dm_bht_create()d bht
817 * @block: specific block data is expected from
818 * @pg: page holding the block data
819 * @offset: offset into the page
821 * Returns 0 on success, 1 on missing data, and a negative error
822 * code on verification failure. All supporting functions called
823 * should return similarly.
825 int dm_bht_verify_block(struct dm_bht *bht, unsigned int block,
826 struct page *pg, unsigned int offset)
830 return dm_bht_verify_path(bht, block, pg, offset);
832 EXPORT_SYMBOL(dm_bht_verify_block);
835 * dm_bht_destroy - cleans up all memory used by @bht
836 * @bht: pointer to a dm_bht_create()d bht
838 * Returns 0 on success. Does not free @bht itself.
840 int dm_bht_destroy(struct dm_bht *bht)
846 while (depth-- != 0) {
847 struct dm_bht_entry *entry = bht->levels[depth].entries;
848 struct dm_bht_entry *entry_end = entry +
849 bht->levels[depth].count;
851 for (; entry < entry_end; ++entry) {
852 state = atomic_read(&entry->state);
854 /* At present, no other states free memory,
855 * but that will change.
857 case DM_BHT_ENTRY_UNALLOCATED:
858 /* Allocated with improper state */
859 BUG_ON(entry->nodes);
862 BUG_ON(!entry->nodes);
863 __free_page(virt_to_page(entry->nodes));
867 kfree(bht->levels[depth].entries);
868 bht->levels[depth].entries = NULL;
871 for (cpu = 0; cpu < nr_cpu_ids; ++cpu)
872 if (bht->hash_desc[cpu].tfm)
873 crypto_free_hash(bht->hash_desc[cpu].tfm);
876 EXPORT_SYMBOL(dm_bht_destroy);
878 /*-----------------------------------------------
880 *-----------------------------------------------*/
883 * dm_bht_sectors - return the sectors required on disk
884 * @bht: pointer to a dm_bht_create()d bht
886 sector_t dm_bht_sectors(const struct dm_bht *bht)
890 EXPORT_SYMBOL(dm_bht_sectors);
893 * dm_bht_set_read_cb - set read callback
894 * @bht: pointer to a dm_bht_create()d bht
895 * @read_cb: callback function used for all read requests by @bht
897 void dm_bht_set_read_cb(struct dm_bht *bht, dm_bht_callback read_cb)
899 bht->read_cb = read_cb;
901 EXPORT_SYMBOL(dm_bht_set_read_cb);
904 * dm_bht_set_write_cb - set write callback
905 * @bht: pointer to a dm_bht_create()d bht
906 * @write_cb: callback function used for all write requests by @bht
908 void dm_bht_set_write_cb(struct dm_bht *bht, dm_bht_callback write_cb)
910 bht->write_cb = write_cb;
912 EXPORT_SYMBOL(dm_bht_set_write_cb);
915 * dm_bht_set_root_hexdigest - sets an unverified root digest hash from hex
916 * @bht: pointer to a dm_bht_create()d bht
917 * @hexdigest: array of u8s containing the new digest in binary
918 * Returns non-zero on error. hexdigest should be NUL terminated.
920 int dm_bht_set_root_hexdigest(struct dm_bht *bht, const u8 *hexdigest)
922 /* Make sure we have at least the bytes expected */
923 if (strnlen((char *)hexdigest, bht->digest_size * 2) !=
924 bht->digest_size * 2) {
925 DMERR("root digest length does not match hash algorithm");
928 dm_bht_hex_to_bin(bht->root_digest, hexdigest, bht->digest_size);
929 #ifdef CONFIG_DM_DEBUG
930 DMINFO("Set root digest to %s. Parsed as -> ", hexdigest);
931 dm_bht_log_mismatch(bht, bht->root_digest, bht->root_digest);
935 EXPORT_SYMBOL(dm_bht_set_root_hexdigest);
938 * dm_bht_root_hexdigest - returns root digest in hex
939 * @bht: pointer to a dm_bht_create()d bht
940 * @hexdigest: u8 array of size @available
941 * @available: must be bht->digest_size * 2 + 1
943 int dm_bht_root_hexdigest(struct dm_bht *bht, u8 *hexdigest, int available)
946 ((unsigned int) available) < bht->digest_size * 2 + 1) {
947 DMERR("hexdigest has too few bytes available");
950 dm_bht_bin_to_hex(bht->root_digest, hexdigest, bht->digest_size);
953 EXPORT_SYMBOL(dm_bht_root_hexdigest);
956 * dm_bht_set_salt - sets the salt used, in hex
957 * @bht: pointer to a dm_bht_create()d bht
958 * @hexsalt: salt string, as hex; will be zero-padded or truncated to
959 * DM_BHT_SALT_SIZE * 2 hex digits.
961 void dm_bht_set_salt(struct dm_bht *bht, const char *hexsalt)
963 size_t saltlen = min(strlen(hexsalt) / 2, sizeof(bht->salt));
964 bht->have_salt = true;
965 memset(bht->salt, 0, sizeof(bht->salt));
966 dm_bht_hex_to_bin(bht->salt, (const u8 *)hexsalt, saltlen);
970 * dm_bht_salt - returns the salt used, in hex
971 * @bht: pointer to a dm_bht_create()d bht
972 * @hexsalt: buffer to put salt into, of length DM_BHT_SALT_SIZE * 2 + 1.
974 int dm_bht_salt(struct dm_bht *bht, char *hexsalt)
978 dm_bht_bin_to_hex(bht->salt, (u8 *)hexsalt, sizeof(bht->salt));