Polish BlobCreator and extend its test

This commit is contained in:
Torsten Grote 2024-09-13 13:50:39 -03:00
parent 237fd683bd
commit dd5180f3b7
No known key found for this signature in database
GPG key ID: 3E5F77D92CF891FF
2 changed files with 34 additions and 13 deletions

View file

@ -5,6 +5,7 @@
package com.stevesoltys.seedvault.transport.backup
import androidx.annotation.WorkerThread
import com.github.luben.zstd.ZstdOutputStream
import com.google.protobuf.ByteString
import com.stevesoltys.seedvault.backend.BackendManager
@ -17,7 +18,11 @@ import okio.buffer
import okio.sink
import org.calyxos.seedvault.chunker.Chunk
import org.calyxos.seedvault.core.backends.AppBackupFileType
import java.io.IOException
/**
* Creates and uploads new blobs to the current backend.
*/
internal class BlobCreator(
private val crypto: Crypto,
private val backendManager: BackendManager,
@ -25,6 +30,11 @@ internal class BlobCreator(
private val buffer = Buffer()
/**
* Creates and returns a new [Blob] from the given [chunk] and uploads it to the backend.
*/
@WorkerThread
@Throws(IOException::class)
suspend fun createNewBlob(chunk: Chunk): Blob {
buffer.clear()
val bufferStream = buffer.outputStream()
@ -36,7 +46,7 @@ internal class BlobCreator(
}
val sha256ByteString = buffer.sha256()
val handle = AppBackupFileType.Blob(crypto.repoId, sha256ByteString.hex())
// TODO exception handling and retries
// TODO for later: implement a backend wrapper that handles retries for transient errors
val size = backendManager.backend.save(handle).use { outputStream ->
val outputBuffer = outputStream.sink().buffer()
val length = outputBuffer.writeAll(buffer)

View file

@ -17,6 +17,7 @@ import org.calyxos.seedvault.core.backends.AppBackupFileType
import org.calyxos.seedvault.core.backends.Backend
import org.calyxos.seedvault.core.toHexString
import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Assertions.assertNotEquals
import org.junit.jupiter.api.Test
import java.io.ByteArrayOutputStream
import java.io.OutputStream
@ -34,7 +35,7 @@ internal class BlobCreatorTest : TransportTest() {
private val blobHandle = slot<AppBackupFileType.Blob>()
@Test
fun `test re-use for hashing two chunks`() = runBlocking {
fun `test re-use instance for creating two blobs`() = runBlocking {
val data1 = Random.nextBytes(1337)
val data2 = Random.nextBytes(2342)
val chunk1 = Chunk(0L, data1.size, data1, "doesn't matter here")
@ -48,24 +49,34 @@ internal class BlobCreatorTest : TransportTest() {
}
every { crypto.repoId } returns repoId
every { backendManager.backend } returns backend
coEvery { backend.save(capture(blobHandle)) } returns outputStream1
blobCreator.createNewBlob(chunk1)
// create first blob
coEvery { backend.save(capture(blobHandle)) } returns outputStream1
val blob1 = blobCreator.createNewBlob(chunk1)
// check that file content hash matches snapshot hash
val messageDigest = MessageDigest.getInstance("SHA-256")
assertEquals(
messageDigest.digest(outputStream1.toByteArray()).toHexString(),
blobHandle.captured.name,
)
val hash1 = messageDigest.digest(outputStream1.toByteArray()).toHexString()
assertEquals(hash1, blobHandle.captured.name)
// check blob metadata
assertEquals(hash1, blob1.id.hexFromProto())
assertEquals(outputStream1.size(), blob1.length)
assertEquals(data1.size, blob1.uncompressedLength)
// use same BlobCreator to create another blob, because we re-use a single buffer
// and need to check clearing that does work as expected
coEvery { backend.save(capture(blobHandle)) } returns outputStream2
blobCreator.createNewBlob(chunk2)
val blob2 = blobCreator.createNewBlob(chunk2)
// check that file content hash matches snapshot hash
assertEquals(
messageDigest.digest(outputStream2.toByteArray()).toHexString(),
blobHandle.captured.name,
)
val hash2 = messageDigest.digest(outputStream2.toByteArray()).toHexString()
assertEquals(hash2, blobHandle.captured.name)
// both hashes are different
assertNotEquals(hash1, hash2)
// check blob metadata
assertEquals(hash2, blob2.id.hexFromProto())
assertEquals(outputStream2.size(), blob2.length)
assertEquals(data2.size, blob2.uncompressedLength)
}
}