m/n/b/mkverity: refactor into VerityEncoder

The implementation was refactored into a stream-oriented VerityEncoder and exposed for use outside the mkverity tool. In addition, end-to-end tests were provided.

Change-Id: I2d009ca8030d6a86e9d6dbe6d6ae60a3b84d2d74
Reviewed-on: https://review.monogon.dev/c/monogon/+/314
Reviewed-by: Sergiusz Bazanski <serge@monogon.tech>
diff --git a/metropolis/node/build/mkverity/BUILD.bazel b/metropolis/node/build/mkverity/BUILD.bazel
index d749762..8d67da3 100644
--- a/metropolis/node/build/mkverity/BUILD.bazel
+++ b/metropolis/node/build/mkverity/BUILD.bazel
@@ -1,14 +1,15 @@
 load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
 
-go_library(
-    name = "go_default_library",
-    srcs = ["main.go"],
-    importpath = "source.monogon.dev/metropolis/node/build/mkverity",
-    visibility = ["//visibility:private"],
-)
-
 go_binary(
     name = "mkverity",
     embed = [":go_default_library"],
     visibility = ["//visibility:private"],
 )
+
+go_library(
+    name = "go_default_library",
+    srcs = ["mkverity.go"],
+    importpath = "source.monogon.dev/metropolis/node/build/mkverity",
+    visibility = ["//visibility:private"],
+    deps = ["//metropolis/pkg/verity:go_default_library"],
+)
diff --git a/metropolis/node/build/mkverity/main.go b/metropolis/node/build/mkverity/main.go
deleted file mode 100644
index d31e647..0000000
--- a/metropolis/node/build/mkverity/main.go
+++ /dev/null
@@ -1,390 +0,0 @@
-// Copyright 2020 The Monogon Project Authors.
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This package implements a subset of the veritysetup tool from cryptsetup,
-// which is a userland tool to interact with dm-verity devices/images. It was
-// rewritten to provide the minimum of functionality needed for Metropolis
-// without having to package, link against and maintain the original C
-// veritysetup tool.
-//
-// dm-verity is a Linux device mapper target that allows integrity verification of
-// a read-only block device. The block device whose integrity should be checked
-// (the 'data device') must be first processed by a tool like veritysetup (or this
-// tool, mkverity) to generate a hash device and root hash.
-// The original data device, hash device and root hash are then set up as a device
-// mapper target, and any read performed from the data device through the verity
-// target will be verified for integrity by Linux using the hash device and root
-// hash.
-//
-// Internally, the hash device is a Merkle tree of all the bytes in the data
-// device, layed out as layers of 'hash blocks'. Starting with data bytes, layers
-// are built recursively, with each layer's output hash blocks becoming the next
-// layer's data input, ending with the single root hash.
-//
-// For more information about the internals, see the Linux and cryptsetup
-// upstream code:
-//
-// https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity#verity-superblock-format
-package main
-
-import (
-	"bytes"
-	"crypto/rand"
-	"crypto/sha256"
-	"encoding/binary"
-	"fmt"
-	"io"
-	"log"
-	"os"
-)
-
-// veritySuperblock represents data layout inside of a dm-verity hash block
-// device superblock. It follows a preexisting verity implementation:
-//
-// https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity#verity-superblock-format
-type veritySuperblock struct {
-	// signature is the magic signature of a verity hash device superblock,
-	// "verity\0\0".
-	signature [8]uint8
-	// version specifies a superblock format. This structure describes version
-	// '1'.
-	version uint32
-	// hashType defaults to '1' outside Chrome OS, according to scarce dm-verity
-	// documentation.
-	hashType uint32
-	// uuid contains a UUID of the hash device.
-	uuid [16]uint8
-	// algorithm stores an ASCII-encoded name of the hash function used.
-	algorithm [32]uint8
-
-	// dataBlockSize specifies a size of a single data device block, in bytes.
-	dataBlockSize uint32
-	// hashBlockSize specifies a size of a single hash device block, in bytes.
-	hashBlockSize uint32
-	// dataBlocks contains a count of blocks available on the data device.
-	dataBlocks uint64
-
-	// saltSize encodes the size of hash block salt, up to the maximum of 256 bytes.
-	saltSize uint16
-
-	// _pad1 is a zeroed space prepending the salt; unused.
-	_pad1 [6]uint8
-	// exactly saltSize bytes of salt are prepended to data blocks before hashing.
-	salt [256]uint8
-	// _pad2 is a zeroed space after the salt; unused.
-	_pad2 [168]uint8
-}
-
-// divideAndRoundup performs an integer division and returns a rounded up
-// result. Useful in calculating block counts.
-func divideAndRoundup(a, b uint64) uint64 {
-	r := a / b
-	if a%b != 0 {
-		r++
-	}
-	return r
-}
-
-// newSuperblock builds a dm-verity hash device superblock based on the size
-// of data image file reachable through dataImagePath.
-// It returns either a fully initialized veritySuperblock, or an
-// initialization error.
-func newSuperblock(dataImagePath string) (*veritySuperblock, error) {
-	// This implementation only handles SHA256-based verity hash images
-	// with a specific 4096-byte block size.
-	// Block sizes can be updated by adjusting the struct literal below.
-	// A change of a hashing algorithm would require a refactor of
-	// saltedDigest, and references to sha256.Size.
-	//
-	// Fill in the defaults (compare with veritySuperblock definition).
-	sb := veritySuperblock{
-		signature:     [8]uint8{'v', 'e', 'r', 'i', 't', 'y', 0, 0},
-		version:       1,
-		hashType:      1,
-		algorithm:     [32]uint8{'s', 'h', 'a', '2', '5', '6'},
-		saltSize:      256,
-		dataBlockSize: 4096,
-		hashBlockSize: 4096,
-	}
-
-	// Get the data image size and compute the data block count.
-	ds, err := os.Stat(dataImagePath)
-	if err != nil {
-		return nil, fmt.Errorf("while stat-ing data device: %w", err)
-	}
-	if !ds.Mode().IsRegular() {
-		return nil, fmt.Errorf("this program only accepts regular files")
-	}
-	sb.dataBlocks = divideAndRoundup(uint64(ds.Size()), uint64(sb.dataBlockSize))
-
-	// Fill in the superblock UUID and cryptographic salt.
-	if _, err := rand.Read(sb.uuid[:]); err != nil {
-		return nil, fmt.Errorf("when generating UUID: %w", err)
-	}
-	if _, err := rand.Read(sb.salt[:]); err != nil {
-		return nil, fmt.Errorf("when generating salt: %w", err)
-	}
-
-	return &sb, nil
-}
-
-// saltedDigest computes and returns a SHA256 sum of a block prepended
-// with a Superblock-defined salt.
-func (sb *veritySuperblock) saltedDigest(data []byte) (digest [sha256.Size]byte) {
-	h := sha256.New()
-	h.Write(sb.salt[:int(sb.saltSize)])
-	h.Write(data)
-	copy(digest[:], h.Sum(nil))
-	return
-}
-
-// dataBlocksPerHashBlock returns the count of hash operation outputs that
-// fit in a hash device block. This is also the amount of data device
-// blocks it takes to populate a hash device block.
-func (sb *veritySuperblock) dataBlocksPerHashBlock() uint64 {
-	return uint64(sb.hashBlockSize) / sha256.Size
-}
-
-// computeHashBlock reads at most sb.dataBlocksPerHashBlock blocks from
-// the given reader object, returning a padded hash block of length
-// defined by sb.hashBlockSize, and an error, if encountered.
-// In case a non-nil block is returned, it's guaranteed to contain at
-// least one hash. An io.EOF signals that there is no more to be read
-// from 'r'.
-func (sb *veritySuperblock) computeHashBlock(r io.Reader) ([]byte, error) {
-	// Preallocate a whole hash block.
-	hblk := bytes.NewBuffer(make([]byte, 0, sb.hashBlockSize))
-
-	// For every data block, compute a hash and place it in hblk. Continue
-	// till EOF.
-	for b := uint64(0); b < sb.dataBlocksPerHashBlock(); b++ {
-		dbuf := make([]byte, sb.dataBlockSize)
-		// Attempt to read enough data blocks to make a complete hash block.
-		n, err := io.ReadFull(r, dbuf)
-		// If any data was read, make a hash and add it to the hash buffer.
-		if n != 0 {
-			hash := sb.saltedDigest(dbuf)
-			hblk.Write(hash[:])
-		}
-		// Handle the read errors.
-		switch err {
-		case nil:
-		case io.ErrUnexpectedEOF, io.EOF:
-			// io.ReadFull returns io.ErrUnexpectedEOF after a partial read,
-			// and io.EOF if no bytes were read. In both cases it's possible
-			// to end up with a partially filled hash block.
-			if hblk.Len() != 0 {
-				// Return a zero-padded hash block if any hashes were written
-				// to it, and signal that no more blocks can be built.
-				res := hblk.Bytes()
-				return res[:cap(res)], io.EOF
-			}
-			// Return nil if the block doesn't contain any hashes.
-			return nil, io.EOF
-		default:
-			// Wrap unhandled read errors.
-			return nil, fmt.Errorf("while computing a hash block: %w", err)
-		}
-	}
-	// Return a completely filled hash block.
-	res := hblk.Bytes()
-	return res[:cap(res)], nil
-}
-
-// writeSuperblock writes a verity superblock to a given writer object.
-// It returns a write error, if encountered.
-func (sb *veritySuperblock) writeSuperblock(w io.Writer) error {
-	// Write the superblock.
-	if err := binary.Write(w, binary.LittleEndian, sb); err != nil {
-		return fmt.Errorf("while writing a header: %w", err)
-	}
-
-	// Get the padding size by substracting current offset from a hash block
-	// size.
-	co := binary.Size(sb)
-	pbc := int(sb.hashBlockSize) - co
-	if pbc <= 0 {
-		return fmt.Errorf("hash device block size smaller than dm-verity superblock")
-	}
-
-	// Write the padding bytes at the end of the block.
-	if _, err := w.Write(bytes.Repeat([]byte{0}, pbc)); err != nil {
-		return fmt.Errorf("while writing padding: %w", err)
-	}
-	return nil
-}
-
-// computeLevelZero produces the base level of a hash tree. It's the only
-// level calculated based on raw input from the data image.
-// It returns a byte slice containing one or more hash blocks, depending
-// on sb.dataBlocks and sb.hashBlockSize, or an error. The returned slice
-// length is guaranteed to be a multiple of sb.hashBlockSize if no error
-// is returned.
-// BUG(mz): Current implementation requires a 1/128th of the data image
-// size to be allocatable on the heap.
-func (sb *veritySuperblock) computeLevel(r io.Reader) ([]byte, error) {
-	// hbuf will store all the computed hash blocks.
-	var hbuf bytes.Buffer
-	// Compute one or more hash blocks, reading all data available in the
-	// 'r' reader object, and write them into hbuf.
-	for {
-		hblk, err := sb.computeHashBlock(r)
-		if err != nil && err != io.EOF {
-			return nil, fmt.Errorf("while building a hash tree level: %w", err)
-		}
-		if hblk != nil {
-			_, err := hbuf.Write(hblk)
-			if err != nil {
-				return nil, fmt.Errorf("while writing to hash block buffer: %w", err)
-			}
-		}
-		if err == io.EOF {
-			break
-		}
-	}
-	return hbuf.Bytes(), nil
-}
-
-// computeHashTree builds a complete hash tree based on the given reader
-// object. Levels are appended to resulting hashTree from bottom to top.
-// It returns a verity hash tree, a verity root hash, and an error, if
-// encountered.
-func (sb *veritySuperblock) computeHashTree(r io.Reader) ([][]byte, []byte, error) {
-	// First, hash contents of the data image. This will result in a bottom
-	// level of the hash tree.
-	var hashTree [][]byte
-	lz, err := sb.computeLevel(r)
-	if err != nil {
-		return nil, nil, fmt.Errorf("while computing the base level: %w", err)
-	}
-	hashTree = append(hashTree, lz)
-
-	// Other levels are built by hashing the hash blocks comprising a level
-	// below.
-	for {
-		// Create the next level by hashing the previous one.
-		pl := hashTree[len(hashTree)-1]
-		nl, err := sb.computeLevel(bytes.NewReader(pl))
-		if err != nil {
-			return nil, nil, fmt.Errorf("while computing a level: %w", err)
-		}
-		// Append the resulting next level to a tree.
-		hashTree = append(hashTree, nl)
-
-		if len(nl) == int(sb.hashBlockSize) {
-			// The last level to compute has a size of exactly one hash block.
-			// That's the root level. Its hash serves as a cryptographic root of
-			// trust and is returned separately.
-			rootHash := sb.saltedDigest(nl)
-			return hashTree, rootHash[:], nil
-		}
-	}
-}
-
-// writeHashTree writes a verity-formatted hash tree to the given writer
-// object. Compare with computeHashTree.
-// It returns the count of bytes written and a write error, if encountered.
-func (sb *veritySuperblock) writeHashTree(w io.Writer, treeLevels [][]byte) error {
-	// Write the hash tree levels from top to bottom.
-	for l := len(treeLevels) - 1; l >= 0; l-- {
-		level := treeLevels[l]
-		// Call w.Write until a whole level is written.
-		for len(level) != 0 {
-			n, err := w.Write(level)
-			if err != nil && err != io.ErrShortWrite {
-				return fmt.Errorf("while writing a level: %w", err)
-			}
-			level = level[n:]
-		}
-	}
-	return nil
-}
-
-// createHashImage creates a complete dm-verity hash image at
-// hashImagePath. Contents of the file at dataImagePath are accessed
-// read-only, hashed and written to the hash image in the process.
-// It returns a verity root hash and an error, if encountered.
-func createHashImage(dataImagePath, hashImagePath string) ([]byte, error) {
-	// Inspect the data image and build a verity superblock based on its size.
-	sb, err := newSuperblock(dataImagePath)
-	if err != nil {
-		return nil, fmt.Errorf("while building a superblock: %w", err)
-	}
-
-	// Open the data image for reading.
-	dataImage, err := os.Open(dataImagePath)
-	if err != nil {
-		return nil, fmt.Errorf("while opening the data image: %w", err)
-	}
-	defer dataImage.Close()
-
-	// Create an empty hash image file.
-	hashImage, err := os.OpenFile(hashImagePath, os.O_RDWR|os.O_CREATE, 0644)
-	if err != nil {
-		return nil, fmt.Errorf("while opening the hash image for writing: %w", err)
-	}
-	defer hashImage.Close()
-
-	// Write the superblock to the hash image.
-	if err = sb.writeSuperblock(hashImage); err != nil {
-		return nil, fmt.Errorf("while writing the superblock: %w", err)
-	}
-
-	// Compute a verity hash tree by hashing contents of the data image. Then,
-	// write it to the hash image.
-	treeLevels, rootHash, err := sb.computeHashTree(dataImage)
-	if err != nil {
-		return nil, fmt.Errorf("while building a hash tree: %w", err)
-	}
-	if err = sb.writeHashTree(hashImage, treeLevels); err != nil {
-		return nil, fmt.Errorf("while writing a hash tree: %w", err)
-	}
-
-	// Return a verity root hash, serving as a root of trust.
-	return rootHash, nil
-}
-
-// usage prints program usage information.
-func usage(executable string) {
-	fmt.Println("Usage: ", executable, " format <data image> <hash image>")
-}
-
-func main() {
-	// Process the command line arguments maintaining a partial
-	// compatibility with veritysetup.
-	if len(os.Args) != 4 {
-		usage(os.Args[0])
-		os.Exit(2)
-	}
-	command := os.Args[1]
-	dataImagePath := os.Args[2]
-	hashImagePath := os.Args[3]
-
-	switch command {
-	case "format":
-		rootHash, err := createHashImage(dataImagePath, hashImagePath)
-		if err != nil {
-			log.Fatal(err)
-		}
-		// The output differs from the original veritysetup utility in that hash
-		// isn't prepended by "Root hash: " string. It's left this way to
-		// facilitate machine processing.
-		fmt.Printf("%x", rootHash)
-	default:
-		usage(os.Args[0])
-		os.Exit(2)
-	}
-}
diff --git a/metropolis/node/build/mkverity/mkverity.go b/metropolis/node/build/mkverity/mkverity.go
new file mode 100644
index 0000000..272bc73
--- /dev/null
+++ b/metropolis/node/build/mkverity/mkverity.go
@@ -0,0 +1,101 @@
+// Copyright 2020 The Monogon Project Authors.
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This package implements a command line tool that creates dm-verity hash
+// images at a selected path, given an existing data image. The tool
+// outputs a Verity mapping table on success.
+//
+// For more information, see:
+// - source.monogon.dev/metropolis/pkg/verity
+// - https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity
+package main
+
+import (
+	"fmt"
+	"io"
+	"log"
+	"os"
+
+	"source.monogon.dev/metropolis/pkg/verity"
+)
+
+// createHashImage creates a complete dm-verity hash image at
+// hashImagePath. Contents of the file at dataImagePath are accessed
+// read-only, hashed and written to the hash image in the process.
+// The verity superblock is written only if wsb is true.
+// It returns a string-convertible VerityMappingTable, or an error.
+func createHashImage(dataImagePath, hashImagePath string, wsb bool) (*verity.MappingTable, error) {
+	// Open the data image for reading.
+	dataImage, err := os.Open(dataImagePath)
+	if err != nil {
+		return nil, fmt.Errorf("while opening the data image: %w", err)
+	}
+	defer dataImage.Close()
+	// Create an empty hash image file.
+	hashImage, err := os.OpenFile(hashImagePath, os.O_RDWR|os.O_CREATE, 0644)
+	if err != nil {
+		return nil, fmt.Errorf("while opening the hash image for writing: %w", err)
+	}
+	defer hashImage.Close()
+
+	// Write hashImage contents. Start with initializing a verity encoder,
+	// seting hashImage as its output.
+	v, err := verity.NewEncoder(hashImage, wsb)
+	if err != nil {
+		return nil, fmt.Errorf("while initializing a verity encoder: %w", err)
+	}
+	// Hash the contents of dataImage, block by block.
+	_, err = io.Copy(v, dataImage)
+	if err != nil {
+		return nil, fmt.Errorf("while reading the data image: %w", err)
+	}
+	// The resulting hash tree won't be written until Close is called.
+	err = v.Close()
+	if err != nil {
+		return nil, fmt.Errorf("while writing the hash image: %w", err)
+	}
+
+	// Return an encoder-generated verity mapping table, containing the salt
+	// and the root hash.
+	mt, err := v.MappingTable(dataImagePath, hashImagePath)
+	if err != nil {
+		return nil, fmt.Errorf("while querying for the mapping table: %w", err)
+	}
+	return mt, nil
+}
+
+// usage prints program usage information.
+func usage(executable string) {
+	fmt.Println("Usage: ", executable, " <data image> <hash image>")
+}
+
+func main() {
+	if len(os.Args) != 3 {
+		usage(os.Args[0])
+		os.Exit(2)
+	}
+	dataImagePath := os.Args[1]
+	hashImagePath := os.Args[2]
+
+	// Attempt to build a new Verity hash Image at hashImagePath, based on
+	// the data image at dataImagePath. Include the Verity superblock.
+	mt, err := createHashImage(dataImagePath, hashImagePath, true)
+	if err != nil {
+		log.Fatal(err)
+	}
+	// Print a Device Mapper compatible mapping table.
+	fmt.Println(mt)
+}
diff --git a/metropolis/pkg/verity/BUILD.bazel b/metropolis/pkg/verity/BUILD.bazel
new file mode 100644
index 0000000..b500b31
--- /dev/null
+++ b/metropolis/pkg/verity/BUILD.bazel
@@ -0,0 +1,25 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+load("//metropolis/test/ktest:ktest.bzl", "ktest")
+
+go_library(
+    name = "go_default_library",
+    srcs = ["encoder.go"],
+    importpath = "source.monogon.dev/metropolis/pkg/verity",
+    visibility = ["//visibility:public"],
+)
+
+go_test(
+    name = "go_default_test",
+    srcs = ["encoder_test.go"],
+    embed = [":go_default_library"],
+    deps = [
+        "//metropolis/pkg/devicemapper:go_default_library",
+        "@com_github_stretchr_testify//require:go_default_library",
+        "@org_golang_x_sys//unix:go_default_library",
+    ],
+)
+
+ktest(
+    cmdline = "ramdisk_size=16384",
+    tester = ":go_default_test",
+)
diff --git a/metropolis/pkg/verity/encoder.go b/metropolis/pkg/verity/encoder.go
new file mode 100644
index 0000000..322acaa
--- /dev/null
+++ b/metropolis/pkg/verity/encoder.go
@@ -0,0 +1,548 @@
+// Copyright 2020 The Monogon Project Authors.
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This package implements the minimum of functionality needed to generate and
+// map dm-verity images. It's provided in order to avoid a perceived higher
+// long term cost of packaging, linking against and maintaining the original C
+// veritysetup tool.
+//
+// dm-verity is a Linux device mapper target that allows integrity verification of
+// a read-only block device. The block device whose integrity should be checked
+// (the 'data device') must be first processed by a tool like veritysetup to
+// generate a hash device and root hash.
+// The original data device, hash device and root hash are then set up as a device
+// mapper target, and any read performed from the data device through the verity
+// target will be verified for integrity by Linux using the hash device and root
+// hash.
+//
+// Internally, the hash device is a Merkle tree of all the bytes in the data
+// device, layed out as layers of 'hash blocks'. Starting with data bytes, layers
+// are built recursively, with each layer's output hash blocks becoming the next
+// layer's data input, ending with the single root hash.
+//
+// For more information about the internals, see the Linux and cryptsetup
+// upstream code:
+//
+// https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity
+package verity
+
+import (
+	"bytes"
+	"crypto/rand"
+	"crypto/sha256"
+	"encoding/binary"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+)
+
+// superblock represents data layout inside of a dm-verity hash block
+// device superblock. It follows a preexisting verity implementation:
+//
+// https://gitlab.com/cryptsetup/cryptsetup/wikis/DMVerity#verity-superblock-format
+type superblock struct {
+	// signature is the magic signature of a verity hash device superblock,
+	// "verity\0\0".
+	signature [8]byte
+	// version specifies a superblock format. This structure describes version
+	// '1'.
+	version uint32
+	// hashType defaults to '1' outside Chrome OS, according to scarce dm-verity
+	// documentation.
+	hashType uint32
+	// uuid contains a UUID of the hash device.
+	uuid [16]byte
+	// algorithm stores an ASCII-encoded name of the hash function used.
+	algorithm [32]byte
+
+	// dataBlockSize specifies a size of a single data device block, in bytes.
+	dataBlockSize uint32
+	// hashBlockSize specifies a size of a single hash device block, in bytes.
+	hashBlockSize uint32
+	// dataBlocks contains a count of blocks available on the data device.
+	dataBlocks uint64
+
+	// saltSize encodes the size of hash block salt, up to the maximum of 256 bytes.
+	saltSize uint16
+
+	// padding
+	_ [6]byte
+	// exactly saltSize bytes of salt are prepended to data blocks before hashing.
+	saltBuffer [256]byte
+	// padding
+	_ [168]byte
+}
+
+// newSuperblock builds a dm-verity hash device superblock based on
+// hardcoded defaults. dataBlocks is the only field left for later
+// initialization.
+// It returns either a partially initialized superblock, or an error.
+func newSuperblock() (*superblock, error) {
+	// This implementation only handles SHA256-based verity hash images
+	// with a specific 4096-byte block size.
+	// Block sizes can be updated by adjusting the struct literal below.
+	// A change of a hashing algorithm would require a refactor of
+	// saltedDigest, and references to sha256.Size.
+	//
+	// Fill in the defaults (compare with superblock definition).
+	sb := superblock{
+		signature:     [8]byte{'v', 'e', 'r', 'i', 't', 'y', 0, 0},
+		version:       1,
+		hashType:      1,
+		algorithm:     [32]byte{'s', 'h', 'a', '2', '5', '6'},
+		saltSize:      64,
+		dataBlockSize: 4096,
+		hashBlockSize: 4096,
+	}
+
+	// Fill in the superblock UUID and cryptographic salt.
+	if _, err := rand.Read(sb.uuid[:]); err != nil {
+		return nil, fmt.Errorf("when generating UUID: %w", err)
+	}
+	if _, err := rand.Read(sb.saltBuffer[:]); err != nil {
+		return nil, fmt.Errorf("when generating salt: %w", err)
+	}
+
+	return &sb, nil
+}
+
+// salt returns a slice of sb.saltBuffer actually occupied by
+// salt bytes, of sb.saltSize length.
+func (sb *superblock) salt() []byte {
+	return sb.saltBuffer[:int(sb.saltSize)]
+}
+
+// algorithmName returns a name of the algorithm used to hash data block
+// digests.
+func (sb *superblock) algorithmName() string {
+	size := bytes.IndexByte(sb.algorithm[:], 0x00)
+	return string(sb.algorithm[:size])
+}
+
+// saltedDigest computes and returns a SHA256 sum of a block prepended
+// with a Superblock-defined salt.
+func (sb *superblock) saltedDigest(data []byte) (digest [sha256.Size]byte) {
+	h := sha256.New()
+	h.Write(sb.salt())
+	h.Write(data)
+	copy(digest[:], h.Sum(nil))
+	return
+}
+
+// dataBlocksPerHashBlock returns the count of hash operation outputs that
+// fit in a hash device block. This is also the amount of data device
+// blocks it takes to populate a hash device block.
+func (sb *superblock) dataBlocksPerHashBlock() uint64 {
+	return uint64(sb.hashBlockSize) / sha256.Size
+}
+
+// computeHashBlock reads at most sb.dataBlocksPerHashBlock blocks from
+// the given reader object, returning a padded hash block of length
+// defined by sb.hashBlockSize, the count of digests output, and an
+// error, if encountered.
+// In case a non-nil block is returned, it's guaranteed to contain at
+// least one hash. An io.EOF signals that there is no more to be read.
+func (sb *superblock) computeHashBlock(r io.Reader) ([]byte, uint64, error) {
+	// dcnt stores the total count of data blocks processed, which is the
+	// as the count of digests output.
+	var dcnt uint64
+	// Preallocate a whole hash block.
+	hblk := bytes.NewBuffer(make([]byte, 0, sb.hashBlockSize))
+
+	// For every data block, compute a hash and place it in hblk. Continue
+	// till EOF.
+	for b := uint64(0); b < sb.dataBlocksPerHashBlock(); b++ {
+		dbuf := make([]byte, sb.dataBlockSize)
+		// Attempt to read enough data blocks to make a complete hash block.
+		n, err := io.ReadFull(r, dbuf)
+		// If any data was read, make a hash and add it to the hash buffer.
+		if n != 0 {
+			hash := sb.saltedDigest(dbuf)
+			hblk.Write(hash[:])
+			dcnt++
+		}
+		// Handle the read errors.
+		switch err {
+		case nil:
+		case io.ErrUnexpectedEOF, io.EOF:
+			// io.ReadFull returns io.ErrUnexpectedEOF after a partial read,
+			// and io.EOF if no bytes were read. In both cases it's possible
+			// to end up with a partially filled hash block.
+			if hblk.Len() != 0 {
+				// Return a zero-padded hash block if any hashes were written
+				// to it, and signal that no more blocks can be built.
+				res := hblk.Bytes()
+				return res[:cap(res)], dcnt, io.EOF
+			}
+			// Return nil if the block doesn't contain any hashes.
+			return nil, 0, io.EOF
+		default:
+			// Wrap unhandled read errors.
+			return nil, 0, fmt.Errorf("while computing a hash block: %w", err)
+		}
+	}
+	// Return a completely filled hash block.
+	res := hblk.Bytes()
+	return res[:cap(res)], dcnt, nil
+}
+
+// WriteTo writes a verity superblock to a given writer object.
+// It returns the count of bytes written, and a write error, if
+// encountered.
+func (sb *superblock) WriteTo(w io.Writer) (int64, error) {
+	// Write the superblock.
+	if err := binary.Write(w, binary.LittleEndian, sb); err != nil {
+		return -1, fmt.Errorf("while writing a header: %w", err)
+	}
+
+	// Get the padding size by substracting current offset from a hash block
+	// size.
+	co := int(binary.Size(sb))
+	pbc := int(sb.hashBlockSize) - int(co)
+	if pbc <= 0 {
+		return int64(co), fmt.Errorf("hash device block size smaller than dm-verity superblock")
+	}
+
+	// Write the padding bytes at the end of the block.
+	n, err := w.Write(bytes.Repeat([]byte{0}, pbc))
+	co += n
+	if err != nil {
+		return int64(co), fmt.Errorf("while writing padding: %w", err)
+	}
+	return int64(co), nil
+}
+
+// computeLevel produces a verity hash tree level based on data read from
+// a given reader object.
+// It returns a byte slice containing one or more hash blocks, or an
+// error.
+// BUG(mz): Current implementation requires a 1/128th of the data image
+// size to be allocatable on the heap.
+func (sb *superblock) computeLevel(r io.Reader) ([]byte, error) {
+	// hbuf will store all the computed hash blocks.
+	var hbuf bytes.Buffer
+	// Compute one or more hash blocks, reading all data available in the
+	// 'r' reader object, and write them into hbuf.
+	for {
+		hblk, _, err := sb.computeHashBlock(r)
+		if err != nil && err != io.EOF {
+			return nil, fmt.Errorf("while building a hash tree level: %w", err)
+		}
+		if hblk != nil {
+			_, err := hbuf.Write(hblk)
+			if err != nil {
+				return nil, fmt.Errorf("while writing to hash block buffer: %w", err)
+			}
+		}
+		if err == io.EOF {
+			break
+		}
+	}
+	return hbuf.Bytes(), nil
+}
+
+// hashTree stores hash tree levels, each level comprising one or more
+// Verity hash blocks. Levels are ordered from bottom to top.
+type hashTree [][]byte
+
+// push appends a level to the hash tree.
+func (ht *hashTree) push(nl []byte) {
+	*ht = append(*ht, nl)
+}
+
+// top returns the topmost level of the hash tree.
+func (ht *hashTree) top() []byte {
+	if len(*ht) == 0 {
+		return nil
+	}
+	return (*ht)[len(*ht)-1]
+}
+
+// WriteTo writes a verity-formatted hash tree to the given writer
+// object.
+// It returns a write error, if encountered.
+func (ht *hashTree) WriteTo(w io.Writer) (int64, error) {
+	// t keeps the count of bytes written to w.
+	var t int64
+	// Write the hash tree levels from top to bottom.
+	for l := len(*ht) - 1; l >= 0; l-- {
+		level := (*ht)[l]
+		// Call w.Write until a whole level is written.
+		for len(level) != 0 {
+			n, err := w.Write(level)
+			if err != nil {
+				return t, fmt.Errorf("while writing a level: %w", err)
+			}
+			level = level[n:]
+			t += int64(n)
+		}
+	}
+	return t, nil
+}
+
+// MappingTable aggregates data needed to generate a complete Verity
+// mapping table.
+type MappingTable struct {
+	// superblock defines the following elements of the mapping table:
+	// - data device block size
+	// - hash device block size
+	// - total count of data blocks
+	// - hash algorithm used
+	// - cryptographic salt used
+	superblock *superblock
+	// dataDevicePath is the filesystem path of the data device used as part
+	// of the Verity Device Mapper target.
+	dataDevicePath string
+	// hashDevicePath is the filesystem path of the hash device used as part
+	// of the Verity Device Mapper target.
+	hashDevicePath string
+	// hashStart marks the starting block of the Verity hash tree.
+	hashStart int
+	// rootHash stores a cryptographic hash of the top hash tree block.
+	rootHash []byte
+}
+
+// VerityParameterList returns a list of Verity target parameters, ordered
+// as they would appear in a parameter string.
+func (t *MappingTable) VerityParameterList() []string {
+	return []string{
+		"1",
+		t.dataDevicePath,
+		t.hashDevicePath,
+		strconv.FormatUint(uint64(t.superblock.dataBlockSize), 10),
+		strconv.FormatUint(uint64(t.superblock.hashBlockSize), 10),
+		strconv.FormatUint(uint64(t.superblock.dataBlocks), 10),
+		strconv.FormatInt(int64(t.hashStart), 10),
+		t.superblock.algorithmName(),
+		hex.EncodeToString(t.rootHash),
+		hex.EncodeToString(t.superblock.salt()),
+	}
+}
+
+// TargetParameters returns the mapping table as a list of Device Mapper
+// target parameters, ordered as they would appear in a parameter string
+// (see: String).
+func (t *MappingTable) TargetParameters() []string {
+	return append(
+		[]string{
+			"0",
+			strconv.FormatUint(t.Length(), 10),
+			"verity",
+		},
+		t.VerityParameterList()...,
+	)
+}
+
+// String returns a string-formatted mapping table for use with Device
+// Mapper.
+// BUG(mz): unescaped whitespace can appear in block device paths
+func (t *MappingTable) String() string {
+	return strings.Join(t.TargetParameters(), " ")
+}
+
+// Length returns the data device length, represented as a number of
+// 512-byte sectors.
+func (t *MappingTable) Length() uint64 {
+	return t.superblock.dataBlocks * uint64(t.superblock.dataBlockSize) / 512
+}
+
+// encoder transforms data blocks written into it into a verity hash
+// tree. It writes out the hash tree only after Close is called on it.
+type encoder struct {
+	// out is the writer object Encoder will write to.
+	out io.Writer
+	// writeSb, if true, will cause a Verity superblock to be written to the
+	// writer object.
+	writeSb bool
+	// sb contains the most of information needed to build a mapping table.
+	sb *superblock
+	// bottom stands for the bottom level of the hash tree. It contains
+	// complete hash blocks of data written to the encoder.
+	bottom bytes.Buffer
+	// dataBuffer stores incoming data for later processing.
+	dataBuffer bytes.Buffer
+	// rootHash stores the verity root hash set on Close.
+	rootHash []byte
+}
+
+// computeHashTree builds a complete hash tree based on the encoder's
+// state. Levels are appended to the returned hash tree starting from the
+// bottom, with the top level written last.
+// e.sb.dataBlocks is set according to the bottom level's length, which
+// must be divisible by e.sb.hashBlockSize.
+// e.rootHash is set on success.
+// It returns an error, if encountered.
+func (e *encoder) computeHashTree() (*hashTree, error) {
+	// Put b at the bottom of the tree. Don't perform a deep copy.
+	ht := hashTree{e.bottom.Bytes()}
+
+	// Other levels are built by hashing the hash blocks comprising a level
+	// below.
+	for {
+		if len(ht.top()) == int(e.sb.hashBlockSize) {
+			// The last level to compute has a size of exactly one hash block.
+			// That's the root level. Its hash serves as a cryptographic root of
+			// trust and is saved into a encoder for later use.
+			// In case the bottom level consists of only one hash block, no more
+			// levels are computed.
+			sd := e.sb.saltedDigest(ht.top())
+			e.rootHash = sd[:]
+			return &ht, nil
+		}
+
+		// Create the next level by hashing the previous one.
+		nl, err := e.sb.computeLevel(bytes.NewReader(ht.top()))
+		if err != nil {
+			return nil, fmt.Errorf("while computing a level: %w", err)
+		}
+		// Append the resulting next level to a tree.
+		ht.push(nl)
+	}
+}
+
+// processDataBuffer processes data blocks contained in e.dataBuffer
+// until no more data is available to form a completely filled hash block.
+// If 'incomplete' is true, all remaining data in e.dataBuffer will be
+// processed, producing a terminating incomplete block.
+// It returns the count of data blocks processed, or an error, if
+// encountered.
+func (e *encoder) processDataBuffer(incomplete bool) (uint64, error) {
+	// tdcnt stores the total count of data blocks processed.
+	var tdcnt uint64
+	// Compute the count of bytes needed to produce a complete hash block.
+	bph := e.sb.dataBlocksPerHashBlock() * uint64(e.sb.dataBlockSize)
+
+	// Iterate until no more data is available in e.dbuf.
+	for uint64(e.dataBuffer.Len()) >= bph || incomplete && e.dataBuffer.Len() != 0 {
+		hb, dcnt, err := e.sb.computeHashBlock(&e.dataBuffer)
+		if err != nil && err != io.EOF {
+			return 0, fmt.Errorf("while processing a data buffer: %w", err)
+		}
+		// Increment the total count of data blocks processed.
+		tdcnt += dcnt
+		// Write the resulting hash block into the level-zero buffer.
+		e.bottom.Write(hb[:])
+	}
+	return tdcnt, nil
+}
+
+// NewEncoder returns a fully initialized encoder, or an error. The
+// encoder will write to the given io.Writer object.
+// A verity superblock will be written, preceding the hash tree, if
+// writeSb is true.
+func NewEncoder(out io.Writer, writeSb bool) (*encoder, error) {
+	sb, err := newSuperblock()
+	if err != nil {
+		return nil, fmt.Errorf("while creating a superblock: %w", err)
+	}
+
+	e := encoder{
+		out:     out,
+		writeSb: writeSb,
+		sb:      sb,
+	}
+	return &e, nil
+}
+
+// Write hashes raw data to form the bottom hash tree level.
+// It returns the number of bytes written, and an error, if encountered.
+func (e *encoder) Write(data []byte) (int, error) {
+	// Copy the input into the data buffer.
+	n, _ := e.dataBuffer.Write(data)
+	// Process only enough data to form a complete hash block. This may
+	// leave excess data in e.dbuf to be processed later on.
+	dcnt, err := e.processDataBuffer(false)
+	if err != nil {
+		return n, fmt.Errorf("while processing the data buffer: %w", err)
+	}
+	// Update the superblock with the count of data blocks written.
+	e.sb.dataBlocks += dcnt
+	return n, nil
+}
+
+// Close builds a complete hash tree based on cached bottom level blocks,
+// then writes it to a preconfigured io.Writer object. A Verity superblock
+// is written, if e.writeSb is true. No data, nor the superblock is written
+// if the encoder is empty.
+// It returns an error, if one was encountered.
+func (e *encoder) Close() error {
+	// Process all buffered data, including data blocks that may not form
+	// a complete hash block.
+	dcnt, err := e.processDataBuffer(true)
+	if err != nil {
+		return fmt.Errorf("while processing the data buffer: %w", err)
+	}
+	// Update the superblock with the count of data blocks written.
+	e.sb.dataBlocks += dcnt
+
+	// Don't write anything if nothing was written to the encoder.
+	if e.bottom.Len() == 0 {
+		return nil
+	}
+
+	// Compute remaining hash tree levels based on the bottom level: e.bottom.
+	ht, err := e.computeHashTree()
+	if err != nil {
+		return fmt.Errorf("while encoding a hash tree: %w", err)
+	}
+
+	// Write the Verity superblock if the encoder was configured to do so.
+	if e.writeSb {
+		if _, err = e.sb.WriteTo(e.out); err != nil {
+			return fmt.Errorf("while writing a superblock: %w", err)
+		}
+	}
+	// Write the hash tree.
+	_, err = ht.WriteTo(e.out)
+	if err != nil {
+		return fmt.Errorf("while writing a hash tree: %w", err)
+	}
+
+	// Reset the encoder.
+	e, err = NewEncoder(e.out, e.writeSb)
+	if err != nil {
+		return fmt.Errorf("while resetting an encoder: %w", err)
+	}
+	return nil
+}
+
+// MappingTable returns a string-convertible Verity target mapping table
+// for use with Device Mapper, or an error. Close must be called on the
+// encoder before calling this function.
+func (e *encoder) MappingTable(dataDevicePath, hashDevicePath string) (*MappingTable, error) {
+	if e.rootHash == nil {
+		if e.bottom.Len() != 0 {
+			return nil, fmt.Errorf("encoder wasn't closed.")
+		}
+		return nil, fmt.Errorf("encoder is empty.")
+	}
+
+	var hs int
+	if e.writeSb {
+		// Account for the superblock by setting the hash tree starting block
+		// to 1 instead of 0.
+		hs = 1
+	}
+	return &MappingTable{
+		superblock:     e.sb,
+		dataDevicePath: dataDevicePath,
+		hashDevicePath: hashDevicePath,
+		hashStart:      hs,
+		rootHash:       e.rootHash,
+	}, nil
+}
diff --git a/metropolis/pkg/verity/encoder_test.go b/metropolis/pkg/verity/encoder_test.go
new file mode 100644
index 0000000..21c4623
--- /dev/null
+++ b/metropolis/pkg/verity/encoder_test.go
@@ -0,0 +1,232 @@
+package verity
+
+import (
+	"bytes"
+	"crypto/aes"
+	"crypto/cipher"
+	"fmt"
+	"io"
+	"os"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+	"golang.org/x/sys/unix"
+
+	dm "source.monogon.dev/metropolis/pkg/devicemapper"
+)
+
+const (
+	// testDataSize configures the size of Verity-protected data devices.
+	testDataSize int64 = 2 * 1024 * 1024
+	// accessMode configures new files' permission bits.
+	accessMode = 0600
+)
+
+// getRamdisk creates a device file pointing to an unused ramdisk.
+// Returns a filesystem path.
+func getRamdisk() (string, error) {
+	for i := 0; ; i++ {
+		path := fmt.Sprintf("/dev/ram%d", i)
+		dn := unix.Mkdev(1, uint32(i))
+		err := unix.Mknod(path, accessMode|unix.S_IFBLK, int(dn))
+		if os.IsExist(err) {
+			continue
+		}
+		if err != nil {
+			return "", err
+		}
+		return path, nil
+	}
+}
+
+// verityDMTarget returns a dm.Target based on a Verity mapping table.
+func verityDMTarget(mt *MappingTable) *dm.Target {
+	return &dm.Target{
+		Type:        "verity",
+		StartSector: 0,
+		Length:      mt.Length(),
+		Parameters:  mt.VerityParameterList(),
+	}
+}
+
+// devZeroReader is a helper type used by writeRandomBytes.
+type devZeroReader struct{}
+
+// Read implements io.Reader on devZeroReader, making it a source of zero
+// bytes.
+func (_ devZeroReader) Read(b []byte) (int, error) {
+	for i := range b {
+		b[i] = 0
+	}
+	return len(b), nil
+}
+
+// writeRandomBytes writes length pseudorandom bytes to a given io.Writer.
+func writeRandomBytes(w io.Writer, length int64) error {
+	keyiv := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+	blkCipher, err := aes.NewCipher(keyiv)
+	if err != nil {
+		return err
+	}
+	var z devZeroReader
+	c := cipher.StreamReader{S: cipher.NewCTR(blkCipher, keyiv), R: z}
+	_, err = io.CopyN(w, c, length)
+	return err
+}
+
+// fillVerityRamdisks fills a block device at dataDevPath with
+// pseudorandom data and writes a complementary Verity hash device to
+// a block device at hashDevPath. Returns a dm.Target configuring a
+// resulting Verity device, and a buffer containing random data written
+// the data device.
+func fillVerityRamdisks(t *testing.T, dataDevPath, hashDevPath string) (*dm.Target, bytes.Buffer) {
+	// Open the data device for writing.
+	dfd, err := os.OpenFile(dataDevPath, os.O_WRONLY, accessMode)
+	require.NoError(t, err, "while opening the data device at %s", dataDevPath)
+	// Open the hash device for writing.
+	hfd, err := os.OpenFile(hashDevPath, os.O_WRONLY, accessMode)
+	require.NoError(t, err, "while opening the hash device at %s", hashDevPath)
+
+	// Create a Verity encoder, backed with hfd. Configure it to write the
+	// Verity superblock.
+	verityEnc, err := NewEncoder(hfd, true)
+	require.NoError(t, err, "while creating a Verity encoder")
+
+	// Write pseudorandom data both to the Verity-protected data device, and
+	// into the Verity encoder, which in turn will write a resulting hash
+	// tree to hfd on Close().
+	var testData bytes.Buffer
+	tdw := io.MultiWriter(dfd, verityEnc, &testData)
+	err = writeRandomBytes(tdw, testDataSize)
+	require.NoError(t, err, "while writing test data")
+
+	// Close the file descriptors.
+	err = verityEnc.Close()
+	require.NoError(t, err, "while closing the Verity encoder")
+	err = hfd.Close()
+	require.NoError(t, err, "while closing the hash device descriptor")
+	err = dfd.Close()
+	require.NoError(t, err, "while closing the data device descriptor")
+
+	// Generate the Verity mapping table based on the encoder state and
+	// device file paths, then return it along with the test data buffer.
+	mt, err := verityEnc.MappingTable(dataDevPath, hashDevPath)
+	require.NoError(t, err, "while building a Verity mapping table")
+	return verityDMTarget(mt), testData
+}
+
+// createVerityDevice maps a Verity device described by dmt while
+// assigning it a name equal to devName. It returns a Verity device path.
+func createVerityDevice(t *testing.T, dmt *dm.Target, devName string) string {
+	devNum, err := dm.CreateActiveDevice(devName, true, []dm.Target{*dmt})
+	require.NoError(t, err, "while creating a Verity device")
+
+	devPath := fmt.Sprintf("/dev/%s", devName)
+	err = unix.Mknod(devPath, accessMode|unix.S_IFBLK, int(devNum))
+	require.NoError(t, err, "while creating a Verity device file at %s", devPath)
+	return devPath
+}
+
+// cleanupVerityDevice deactivates a Verity device previously mapped by
+// createVerityDevice, and removes an associated device file.
+func cleanupVerityDevice(t *testing.T, devName string) {
+	err := dm.RemoveDevice(devName)
+	require.NoError(t, err, "while removing a Verity device %s", devName)
+
+	devPath := fmt.Sprintf("/dev/%s", devName)
+	err = os.Remove(devPath)
+	require.NoError(t, err, "while removing a Verity device file at %s", devPath)
+}
+
+// testRead compares contents of a block device at devPath with
+// expectedData. The length of data read is equal to the length
+// of expectedData.
+// It returns 'false', if either data could not be read or it does not
+// match expectedData, and 'true' otherwise.
+func testRead(t *testing.T, devPath string, expectedData []byte) bool {
+	// Open the Verity device.
+	verityDev, err := os.Open(devPath)
+	require.NoError(t, err, "while opening a Verity device at %s", devPath)
+	defer verityDev.Close()
+
+	// Attempt to read the test data. Abort on read errors.
+	readData := make([]byte, len(expectedData))
+	_, err = io.ReadFull(verityDev, readData)
+	if err != nil {
+		return false
+	}
+
+	// Return true, if read data matches expectedData.
+	if bytes.Compare(expectedData, readData) == 0 {
+		return true
+	}
+	return false
+}
+
+// TestMakeAndRead attempts to create a Verity device, then verifies the
+// integrity of its contents.
+func TestMakeAndRead(t *testing.T) {
+	if os.Getenv("IN_KTEST") != "true" {
+		t.Skip("Not in ktest")
+	}
+
+	// Allocate block devices backing the Verity target.
+	dataDevPath, err := getRamdisk()
+	require.NoError(t, err, "while allocating a data device ramdisk")
+	hashDevPath, err := getRamdisk()
+	require.NoError(t, err, "while allocating a hash device ramdisk")
+
+	// Fill the data device with test data and write a corresponding Verity
+	// hash tree to the hash device.
+	dmTarget, expectedDataBuf := fillVerityRamdisks(t, dataDevPath, hashDevPath)
+
+	// Create a Verity device using dmTarget. Use the test name as a device
+	// handle. verityPath will point to a resulting new block device.
+	verityPath := createVerityDevice(t, dmTarget, t.Name())
+	defer cleanupVerityDevice(t, t.Name())
+
+	// Use testRead to compare Verity target device contents with test data
+	// written to the data block device at dataDevPath by fillVerityRamdisks.
+	if !testRead(t, verityPath, expectedDataBuf.Bytes()) {
+		t.Error("data read from the verity device doesn't match the source")
+	}
+}
+
+// TestMalformed checks whenever Verity would prevent reading from a
+// target whose hash device contents have been corrupted, as is expected.
+func TestMalformed(t *testing.T) {
+	if os.Getenv("IN_KTEST") != "true" {
+		t.Skip("Not in ktest")
+	}
+
+	// Allocate block devices backing the Verity target.
+	dataDevPath, err := getRamdisk()
+	require.NoError(t, err, "while allocating a data device ramdisk")
+	hashDevPath, err := getRamdisk()
+	require.NoError(t, err, "while allocating a hash device ramdisk")
+
+	// Fill the data device with test data and write a corresponding Verity
+	// hash tree to the hash device.
+	dmTarget, expectedDataBuf := fillVerityRamdisks(t, dataDevPath, hashDevPath)
+
+	// Corrupt the first hash device block before mapping the Verity target.
+	hfd, err := os.OpenFile(hashDevPath, os.O_RDWR, accessMode)
+	require.NoError(t, err, "while opening a hash device at %s", hashDevPath)
+	// Place an odd byte at the 256th byte of the first hash block, skipping
+	// a 4096-byte Verity superblock.
+	hfd.Seek(4096+256, io.SeekStart)
+	hfd.Write([]byte{'F'})
+	hfd.Close()
+
+	// Create a Verity device using dmTarget. Use the test name as a device
+	// handle. verityPath will point to a resulting new block device.
+	verityPath := createVerityDevice(t, dmTarget, t.Name())
+	defer cleanupVerityDevice(t, t.Name())
+
+	// Use testRead to compare Verity target device contents with test data
+	// written to the data block device at dataDevPath by fillVerityRamdisks.
+	// This step is expected to fail after an incomplete read.
+	if testRead(t, verityPath, expectedDataBuf.Bytes()) {
+		t.Error("data matches the source when it shouldn't")
+	}
+}