Properly handle exception while writing zip chunk entries
This commit is contained in:
parent
dc7405474e
commit
758186392d
3 changed files with 143 additions and 6 deletions
|
@ -17,7 +17,6 @@ import java.io.OutputStream
|
|||
import java.nio.file.attribute.FileTime
|
||||
import java.security.GeneralSecurityException
|
||||
import java.util.zip.ZipEntry
|
||||
import java.util.zip.ZipOutputStream
|
||||
import kotlin.math.min
|
||||
|
||||
internal data class ChunkWriterResult(
|
||||
|
@ -121,9 +120,17 @@ internal class ChunkWriter(
|
|||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
fun writeNewZipEntry(zipOutputStream: ZipOutputStream, counter: Int, inputStream: InputStream) {
|
||||
fun writeNewZipEntry(
|
||||
zipOutputStream: NameZipOutputStream,
|
||||
counter: Int,
|
||||
inputStream: InputStream,
|
||||
) {
|
||||
// If copying below throws an exception, we'd be adding a new entry with the same counter,
|
||||
// so we check if we have added an entry for that counter already to prevent duplicates.
|
||||
if ((zipOutputStream.lastName?.toIntOrNull() ?: 0) != counter) {
|
||||
val entry = createNewZipEntry(counter)
|
||||
zipOutputStream.putNextEntry(entry)
|
||||
}
|
||||
inputStream.copyTo(zipOutputStream)
|
||||
zipOutputStream.closeEntry()
|
||||
}
|
||||
|
|
|
@ -11,7 +11,9 @@ import org.calyxos.backup.storage.toHexString
|
|||
import java.io.ByteArrayOutputStream
|
||||
import java.io.IOException
|
||||
import java.io.InputStream
|
||||
import java.io.OutputStream
|
||||
import java.security.GeneralSecurityException
|
||||
import java.util.zip.ZipEntry
|
||||
import java.util.zip.ZipOutputStream
|
||||
import javax.crypto.Mac
|
||||
|
||||
|
@ -36,7 +38,7 @@ internal class ZipChunker(
|
|||
private val files = ArrayList<ContentFile>()
|
||||
|
||||
private val outputStream = ByteArrayOutputStream(chunkSizeMax)
|
||||
private var zipOutputStream = ZipOutputStream(outputStream)
|
||||
private var zipOutputStream = NameZipOutputStream(outputStream)
|
||||
|
||||
// we start with 1, because 0 is the default value in protobuf 3
|
||||
private var counter = 1
|
||||
|
@ -77,8 +79,21 @@ internal class ZipChunker(
|
|||
private fun reset() {
|
||||
files.clear()
|
||||
outputStream.reset()
|
||||
zipOutputStream = ZipOutputStream(outputStream)
|
||||
zipOutputStream = NameZipOutputStream(outputStream)
|
||||
counter = 1
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* A wrapper for [ZipOutputStream] that remembers the name of the last [ZipEntry] that was added.
|
||||
*/
|
||||
internal class NameZipOutputStream(outputStream: OutputStream) : ZipOutputStream(outputStream) {
|
||||
internal var lastName: String? = null
|
||||
private set
|
||||
|
||||
override fun putNextEntry(e: ZipEntry) {
|
||||
super.putNextEntry(e)
|
||||
lastName = e.name
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,115 @@
|
|||
/*
|
||||
* SPDX-FileCopyrightText: 2021 The Calyx Institute
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package org.calyxos.backup.storage.backup
|
||||
|
||||
import android.content.ContentResolver
|
||||
import io.mockk.Runs
|
||||
import io.mockk.coEvery
|
||||
import io.mockk.coVerify
|
||||
import io.mockk.every
|
||||
import io.mockk.just
|
||||
import io.mockk.mockk
|
||||
import kotlinx.coroutines.runBlocking
|
||||
import org.calyxos.backup.storage.api.BackupObserver
|
||||
import org.calyxos.backup.storage.api.StoragePlugin
|
||||
import org.calyxos.backup.storage.crypto.Hkdf.KEY_SIZE_BYTES
|
||||
import org.calyxos.backup.storage.crypto.StreamCrypto
|
||||
import org.calyxos.backup.storage.db.CachedChunk
|
||||
import org.calyxos.backup.storage.db.ChunksCache
|
||||
import org.calyxos.backup.storage.db.FilesCache
|
||||
import org.calyxos.backup.storage.getRandomDocFile
|
||||
import org.calyxos.backup.storage.getRandomString
|
||||
import org.calyxos.backup.storage.mockLog
|
||||
import org.calyxos.backup.storage.toHexString
|
||||
import org.junit.Assert.assertEquals
|
||||
import org.junit.Test
|
||||
import java.io.ByteArrayInputStream
|
||||
import java.io.ByteArrayOutputStream
|
||||
import java.io.IOException
|
||||
import java.io.InputStream
|
||||
import javax.crypto.Mac
|
||||
import kotlin.random.Random
|
||||
|
||||
internal class SmallFileBackupIntegrationTest {
|
||||
|
||||
private val contentResolver: ContentResolver = mockk()
|
||||
private val filesCache: FilesCache = mockk()
|
||||
private val mac: Mac = mockk()
|
||||
private val chunksCache: ChunksCache = mockk()
|
||||
private val storagePlugin: StoragePlugin = mockk()
|
||||
|
||||
private val chunkWriter = ChunkWriter(
|
||||
streamCrypto = StreamCrypto,
|
||||
streamKey = Random.nextBytes(KEY_SIZE_BYTES),
|
||||
chunksCache = chunksCache,
|
||||
storagePlugin = storagePlugin,
|
||||
)
|
||||
private val zipChunker = ZipChunker(
|
||||
mac = mac,
|
||||
chunkWriter = chunkWriter,
|
||||
)
|
||||
|
||||
private val smallFileBackup = SmallFileBackup(contentResolver, filesCache, zipChunker, true)
|
||||
|
||||
init {
|
||||
mockLog()
|
||||
}
|
||||
|
||||
/**
|
||||
* This tests that if writing out one ZIP entry throws an exception,
|
||||
* the subsequent entries can still be written.
|
||||
* Previously, we'd start a new ZipEntry with the same counter value
|
||||
* which is not allowed, so all subsequent files would also not get backed up.
|
||||
*/
|
||||
@Test
|
||||
fun `first of two new files throws and gets ignored`(): Unit = runBlocking {
|
||||
val file1 = getRandomDocFile()
|
||||
val file2 = getRandomDocFile()
|
||||
val files = listOf(file1, file2)
|
||||
val availableChunkIds = hashSetOf(getRandomString(6))
|
||||
val observer: BackupObserver = mockk()
|
||||
|
||||
val inputStream1: InputStream = mockk()
|
||||
val inputStream2: InputStream = ByteArrayInputStream(Random.nextBytes(42))
|
||||
val outputStream2 = ByteArrayOutputStream()
|
||||
|
||||
val chunkId = Random.nextBytes(KEY_SIZE_BYTES)
|
||||
val cachedChunk = CachedChunk(chunkId.toHexString(), 0, 181, 0)
|
||||
val cachedFile2 = file2.toCachedFile(listOf(chunkId.toHexString()), 1)
|
||||
val backupFile = file2.toBackupFile(cachedFile2.chunks, cachedFile2.zipIndex)
|
||||
|
||||
every { filesCache.getByUri(file1.uri) } returns null
|
||||
every { filesCache.getByUri(file2.uri) } returns null
|
||||
|
||||
every { contentResolver.openInputStream(file1.uri) } returns inputStream1
|
||||
every { contentResolver.openInputStream(file2.uri) } returns inputStream2
|
||||
|
||||
every { inputStream1.read(any<ByteArray>()) } throws IOException()
|
||||
coEvery { observer.onFileBackupError(file1, "S") } just Runs
|
||||
|
||||
every { mac.doFinal(any<ByteArray>()) } returns chunkId
|
||||
every { chunksCache.get(any()) } returns null
|
||||
every { storagePlugin.getChunkOutputStream(any()) } returns outputStream2
|
||||
every { chunksCache.insert(cachedChunk) } just Runs
|
||||
every {
|
||||
filesCache.upsert(match {
|
||||
it.copy(lastSeen = cachedFile2.lastSeen) == cachedFile2
|
||||
})
|
||||
} just Runs
|
||||
coEvery { observer.onFileBackedUp(file2, true, 0, 181, "S") } just Runs
|
||||
|
||||
val result = smallFileBackup.backupFiles(files, availableChunkIds, observer)
|
||||
assertEquals(setOf(chunkId.toHexString()), result.chunkIds)
|
||||
assertEquals(1, result.backupDocumentFiles.size)
|
||||
assertEquals(backupFile, result.backupDocumentFiles[0])
|
||||
assertEquals(0, result.backupMediaFiles.size)
|
||||
|
||||
coVerify {
|
||||
observer.onFileBackedUp(file2, true, 0, 181, "S")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in a new issue