Fully implement BackupReceiver and write tests
This commit is contained in:
parent
538d794d8d
commit
52f528dbf0
16 changed files with 360 additions and 172 deletions
|
@ -63,7 +63,6 @@ class KvBackupInstrumentationTest : KoinComponent {
|
|||
this.packageName = packageName
|
||||
}
|
||||
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
every { inputFactory.getBackupDataInput(data) } returns dataInput
|
||||
every { dataInput.readNextHeader() } returnsMany listOf(true, false)
|
||||
every { dataInput.key } returns key
|
||||
|
@ -77,8 +76,7 @@ class KvBackupInstrumentationTest : KoinComponent {
|
|||
|
||||
backup.performBackup(packageInfo, data, FLAG_NON_INCREMENTAL)
|
||||
|
||||
coEvery { backupReceiver.readFromStream(any()) } just Runs
|
||||
coEvery { backupReceiver.finalize() } returns backupData
|
||||
coEvery { backupReceiver.readFromStream(any(), any()) } returns backupData
|
||||
|
||||
runBlocking {
|
||||
assertEquals(backupData, backup.finishBackup())
|
||||
|
|
|
@ -69,8 +69,10 @@ class IconManagerTest : KoinComponent {
|
|||
val blob = blob { id = ByteString.fromHex(blobId) }
|
||||
|
||||
// upload icons and capture plaintext bytes
|
||||
coEvery { backupReceiver.addBytes(capture(output)) } just Runs
|
||||
coEvery { backupReceiver.finalize() } returns BackupData(chunkList, mapOf(chunkId to blob))
|
||||
coEvery { backupReceiver.addBytes(any(), capture(output)) } just Runs
|
||||
coEvery {
|
||||
backupReceiver.finalize(any())
|
||||
} returns BackupData(chunkList, mapOf(chunkId to blob))
|
||||
iconManager.uploadIcons()
|
||||
assertTrue(output.captured.isNotEmpty())
|
||||
|
||||
|
@ -93,13 +95,13 @@ class IconManagerTest : KoinComponent {
|
|||
val output1 = slot<ByteArray>()
|
||||
val output2 = slot<ByteArray>()
|
||||
|
||||
coEvery { backupReceiver.addBytes(capture(output1)) } just Runs
|
||||
coEvery { backupReceiver.finalize() } returns BackupData(emptyList(), emptyMap())
|
||||
coEvery { backupReceiver.addBytes(any(), capture(output1)) } just Runs
|
||||
coEvery { backupReceiver.finalize(any()) } returns BackupData(emptyList(), emptyMap())
|
||||
iconManager.uploadIcons()
|
||||
assertTrue(output1.captured.isNotEmpty())
|
||||
|
||||
coEvery { backupReceiver.addBytes(capture(output2)) } just Runs
|
||||
coEvery { backupReceiver.finalize() } returns BackupData(emptyList(), emptyMap())
|
||||
coEvery { backupReceiver.addBytes(any(), capture(output2)) } just Runs
|
||||
coEvery { backupReceiver.finalize(any()) } returns BackupData(emptyList(), emptyMap())
|
||||
iconManager.uploadIcons()
|
||||
assertTrue(output2.captured.isNotEmpty())
|
||||
|
||||
|
|
|
@ -5,21 +5,43 @@
|
|||
|
||||
package com.stevesoltys.seedvault.transport.backup
|
||||
|
||||
import androidx.annotation.WorkerThread
|
||||
import com.stevesoltys.seedvault.crypto.Crypto
|
||||
import com.stevesoltys.seedvault.proto.Snapshot.Blob
|
||||
import org.calyxos.seedvault.chunker.Chunk
|
||||
import org.calyxos.seedvault.chunker.Chunker
|
||||
import org.calyxos.seedvault.chunker.GearTableCreator
|
||||
import org.calyxos.seedvault.core.toHexString
|
||||
import java.io.IOException
|
||||
import java.io.InputStream
|
||||
|
||||
/**
|
||||
* Essential metadata returned when storing backup data.
|
||||
*
|
||||
* @param chunkIds an ordered(!) list of the chunk IDs required to re-assemble the backup data.
|
||||
* @param blobMap a mapping from chunk ID to [Blob] on the backend.
|
||||
* Needed for fetching blobs from the backend for re-assembly.
|
||||
*/
|
||||
data class BackupData(
|
||||
val chunks: List<String>,
|
||||
val chunkMap: Map<String, Blob>,
|
||||
val chunkIds: List<String>,
|
||||
val blobMap: Map<String, Blob>,
|
||||
) {
|
||||
val size get() = chunkMap.values.sumOf { it.uncompressedLength }.toLong()
|
||||
/**
|
||||
* The uncompressed plaintext size of all blobs.
|
||||
*/
|
||||
val size get() = blobMap.values.sumOf { it.uncompressedLength }.toLong()
|
||||
}
|
||||
|
||||
/**
|
||||
* The single point for receiving data for backup.
|
||||
* Data received will get split into smaller chunks, if needed.
|
||||
* [Chunk]s that don't have a corresponding [Blob] in the [blobCache]
|
||||
* will be passed to the [blobCreator] and have the new blob saved to the backend.
|
||||
*
|
||||
* Data can be received either via [addBytes] (requires matching call to [finalize])
|
||||
* or via [readFromStream].
|
||||
* This call is *not* thread-safe.
|
||||
*/
|
||||
internal class BackupReceiver(
|
||||
private val blobCache: BlobCache,
|
||||
private val blobCreator: BlobCreator,
|
||||
|
@ -36,54 +58,74 @@ internal class BackupReceiver(
|
|||
normalization = 1,
|
||||
gearTable = GearTableCreator.create(crypto.gearTableKey),
|
||||
hashFunction = { bytes ->
|
||||
// this calculates the chunkId
|
||||
crypto.sha256(bytes).toHexString()
|
||||
},
|
||||
)
|
||||
}
|
||||
private val chunks = mutableListOf<String>()
|
||||
private val chunkMap = mutableMapOf<String, Blob>()
|
||||
private var addedBytes = false
|
||||
private val blobMap = mutableMapOf<String, Blob>()
|
||||
private var owner: String? = null
|
||||
|
||||
suspend fun addBytes(bytes: ByteArray) {
|
||||
addedBytes = true
|
||||
/**
|
||||
* Adds more [bytes] to be chunked and saved.
|
||||
* Must call [finalize] when done, even when an exception was thrown
|
||||
* to free up this re-usable instance of [BackupReceiver].
|
||||
*/
|
||||
@WorkerThread
|
||||
@Throws(IOException::class)
|
||||
suspend fun addBytes(owner: String, bytes: ByteArray) {
|
||||
checkOwner(owner)
|
||||
chunker.addBytes(bytes).forEach { chunk ->
|
||||
onNewChunk(chunk)
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun readFromStream(inputStream: InputStream) {
|
||||
/**
|
||||
* Reads backup data from the given [inputStream] and returns [BackupData],
|
||||
* so a call to [finalize] isn't required.
|
||||
* The caller must close the [inputStream] when done.
|
||||
*/
|
||||
@WorkerThread
|
||||
@Throws(IOException::class)
|
||||
suspend fun readFromStream(owner: String, inputStream: InputStream): BackupData {
|
||||
checkOwner(owner)
|
||||
try {
|
||||
val buffer = ByteArray(DEFAULT_BUFFER_SIZE)
|
||||
var bytes = inputStream.read(buffer)
|
||||
while (bytes >= 0) {
|
||||
if (bytes == buffer.size) {
|
||||
addBytes(buffer)
|
||||
addBytes(owner, buffer)
|
||||
} else {
|
||||
addBytes(buffer.copyOfRange(0, bytes))
|
||||
addBytes(owner, buffer.copyOfRange(0, bytes))
|
||||
}
|
||||
bytes = inputStream.read(buffer)
|
||||
}
|
||||
return finalize(owner)
|
||||
} catch (e: Exception) {
|
||||
finalize()
|
||||
finalize(owner)
|
||||
throw e
|
||||
}
|
||||
}
|
||||
|
||||
suspend fun finalize(): BackupData {
|
||||
chunker.finalize().forEach { chunk ->
|
||||
onNewChunk(chunk)
|
||||
/**
|
||||
* Must be called after one or more calls to [addBytes] to finalize usage of this instance
|
||||
* and receive the [BackupData] for snapshotting.
|
||||
*/
|
||||
@WorkerThread
|
||||
@Throws(IOException::class)
|
||||
suspend fun finalize(owner: String): BackupData {
|
||||
checkOwner(owner)
|
||||
try {
|
||||
chunker.finalize().forEach { chunk ->
|
||||
onNewChunk(chunk)
|
||||
}
|
||||
return BackupData(chunks.toList(), blobMap.toMap())
|
||||
} finally {
|
||||
chunks.clear()
|
||||
blobMap.clear()
|
||||
this.owner = null
|
||||
}
|
||||
// copy chunks and chunkMap before clearing
|
||||
val backupData = BackupData(chunks.toList(), chunkMap.toMap())
|
||||
chunks.clear()
|
||||
chunkMap.clear()
|
||||
addedBytes = false
|
||||
return backupData
|
||||
}
|
||||
|
||||
fun assertFinalized() {
|
||||
// TODO maybe even use a userTag and throw also above if that doesn't match
|
||||
check(!addedBytes) { "Re-used non-finalized BackupReceiver" }
|
||||
}
|
||||
|
||||
private suspend fun onNewChunk(chunk: Chunk) {
|
||||
|
@ -92,11 +134,16 @@ internal class BackupReceiver(
|
|||
val existingBlob = blobCache[chunk.hash]
|
||||
if (existingBlob == null) {
|
||||
val blob = blobCreator.createNewBlob(chunk)
|
||||
chunkMap[chunk.hash] = blob
|
||||
blobMap[chunk.hash] = blob
|
||||
blobCache.saveNewBlob(chunk.hash, blob)
|
||||
} else {
|
||||
chunkMap[chunk.hash] = existingBlob
|
||||
blobMap[chunk.hash] = existingBlob
|
||||
}
|
||||
}
|
||||
|
||||
private fun checkOwner(owner: String) {
|
||||
if (this.owner == null) this.owner = owner
|
||||
else check(this.owner == owner) { "Owned by ${this.owner}, but called from $owner" }
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -109,7 +109,6 @@ internal class FullBackup(
|
|||
// create new state
|
||||
val inputStream = inputFactory.getInputStream(socket)
|
||||
state = FullBackupState(targetPackage, socket, inputStream)
|
||||
backupReceiver.assertFinalized()
|
||||
return TRANSPORT_OK
|
||||
}
|
||||
|
||||
|
@ -131,7 +130,7 @@ internal class FullBackup(
|
|||
val payload = ByteArray(numBytes)
|
||||
val read = state.inputStream.read(payload, 0, numBytes)
|
||||
if (read != numBytes) throw EOFException("Read $read bytes instead of $numBytes.")
|
||||
backupReceiver.addBytes(payload)
|
||||
backupReceiver.addBytes(getOwner(state.packageName), payload)
|
||||
state.size += numBytes
|
||||
TRANSPORT_OK
|
||||
} catch (e: IOException) {
|
||||
|
@ -147,7 +146,7 @@ internal class FullBackup(
|
|||
// TODO check if worth keeping the blobs. they've been uploaded already and may be re-usable
|
||||
// so we could add them to the snapshot's blobMap or just let prune remove them at the end
|
||||
try {
|
||||
backupReceiver.finalize()
|
||||
backupReceiver.finalize(getOwner(state.packageName))
|
||||
} catch (e: Exception) {
|
||||
// as the backup was cancelled anyway, we don't care if finalizing had an error
|
||||
Log.e(TAG, "Error finalizing backup in cancelFullBackup().", e)
|
||||
|
@ -163,7 +162,7 @@ internal class FullBackup(
|
|||
val state = this.state ?: error("No state when finishing")
|
||||
Log.i(TAG, "Finish full backup of ${state.packageName}. Wrote ${state.size} bytes")
|
||||
val result = try {
|
||||
backupReceiver.finalize()
|
||||
backupReceiver.finalize(getOwner(state.packageName))
|
||||
} finally {
|
||||
clearState()
|
||||
}
|
||||
|
@ -177,6 +176,8 @@ internal class FullBackup(
|
|||
this.state = null
|
||||
}
|
||||
|
||||
private fun getOwner(packageName: String) = "FullBackup $packageName"
|
||||
|
||||
private fun closeLogging(closable: Closeable?) = try {
|
||||
closable?.close()
|
||||
} catch (e: Exception) {
|
||||
|
|
|
@ -60,7 +60,6 @@ internal class KVBackup(
|
|||
else -> Log.i(TAG, "Performing K/V backup for $packageName")
|
||||
}
|
||||
check(state == null) { "Have unexpected state for ${state?.packageInfo?.packageName}" }
|
||||
backupReceiver.assertFinalized()
|
||||
|
||||
// initialize state
|
||||
state = KVBackupState(packageInfo = packageInfo, db = dbManager.getDb(packageName))
|
||||
|
@ -161,15 +160,15 @@ internal class KVBackup(
|
|||
suspend fun finishBackup(): BackupData {
|
||||
val state = this.state ?: error("No state in finishBackup")
|
||||
val packageName = state.packageInfo.packageName
|
||||
val owner = "KV $packageName"
|
||||
Log.i(TAG, "Finish K/V Backup of $packageName")
|
||||
|
||||
try {
|
||||
state.db.vacuum()
|
||||
state.db.close()
|
||||
dbManager.getDbInputStream(packageName).use { inputStream ->
|
||||
backupReceiver.readFromStream(inputStream)
|
||||
val backupData = dbManager.getDbInputStream(packageName).use { inputStream ->
|
||||
backupReceiver.readFromStream(owner, inputStream)
|
||||
}
|
||||
val backupData = backupReceiver.finalize()
|
||||
Log.d(TAG, "Uploaded db file for $packageName.")
|
||||
return backupData
|
||||
} finally { // exceptions bubble up
|
||||
|
|
|
@ -59,7 +59,7 @@ internal class SnapshotCreator(
|
|||
fun onApkBackedUp(
|
||||
packageInfo: PackageInfo,
|
||||
apk: Apk,
|
||||
chunkMap: Map<String, Blob>,
|
||||
blobMap: Map<String, Blob>,
|
||||
) {
|
||||
appBuilderMap.getOrPut(packageInfo.packageName) {
|
||||
App.newBuilder()
|
||||
|
@ -68,7 +68,7 @@ internal class SnapshotCreator(
|
|||
if (label != null) name = label.toString()
|
||||
setApk(apk)
|
||||
}
|
||||
blobsMap.putAll(chunkMap)
|
||||
blobsMap.putAll(blobMap)
|
||||
}
|
||||
|
||||
fun onPackageBackedUp(
|
||||
|
@ -78,7 +78,7 @@ internal class SnapshotCreator(
|
|||
) {
|
||||
val packageName = packageInfo.packageName
|
||||
val isSystemApp = packageInfo.isSystemApp()
|
||||
val chunkIds = backupData.chunks.forProto()
|
||||
val chunkIds = backupData.chunkIds.forProto()
|
||||
appBuilderMap.getOrPut(packageName) {
|
||||
App.newBuilder()
|
||||
}.apply {
|
||||
|
@ -91,13 +91,13 @@ internal class SnapshotCreator(
|
|||
launchableSystemApp = isSystemApp && launchableSystemApps.contains(packageName)
|
||||
addAllChunkIds(chunkIds)
|
||||
}
|
||||
blobsMap.putAll(backupData.chunkMap)
|
||||
blobsMap.putAll(backupData.blobMap)
|
||||
metadataManager.onPackageBackedUp(packageInfo, backupType, backupData.size)
|
||||
}
|
||||
|
||||
fun onIconsBackedUp(backupData: BackupData) {
|
||||
snapshotBuilder.addAllIconChunkIds(backupData.chunks.forProto())
|
||||
blobsMap.putAll(backupData.chunkMap)
|
||||
snapshotBuilder.addAllIconChunkIds(backupData.chunkIds.forProto())
|
||||
blobsMap.putAll(backupData.blobMap)
|
||||
}
|
||||
|
||||
fun finalizeSnapshot(): Snapshot {
|
||||
|
|
|
@ -106,15 +106,15 @@ internal class ApkBackup(
|
|||
" already has a backup ($backedUpVersion)" +
|
||||
" with the same signature. Not backing it up."
|
||||
)
|
||||
// build up chunkMap from old snapshot
|
||||
// build up blobMap from old snapshot
|
||||
val chunkIds = oldApk.splitsList.flatMap {
|
||||
it.chunkIdsList.map { chunkId -> chunkId.hexFromProto() }
|
||||
}
|
||||
val chunkMap = chunkIds.associateWith { chunkId ->
|
||||
val blobMap = chunkIds.associateWith { chunkId ->
|
||||
latestSnapshot.blobsMap[chunkId] ?: error("Missing blob for $chunkId")
|
||||
}
|
||||
// important: add old APK to snapshot or it wouldn't be part of backup
|
||||
snapshotCreator.onApkBackedUp(packageInfo, oldApk, chunkMap)
|
||||
snapshotCreator.onApkBackedUp(packageInfo, oldApk, blobMap)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -131,27 +131,22 @@ internal class ApkBackup(
|
|||
// get an InputStream for the APK
|
||||
val sourceDir = packageInfo.applicationInfo?.sourceDir ?: return
|
||||
// upload the APK to the backend
|
||||
getApkInputStream(sourceDir).use { inputStream ->
|
||||
backupReceiver.readFromStream(inputStream)
|
||||
val owner = getOwner(packageName, "")
|
||||
val backupData = getApkInputStream(sourceDir).use { inputStream ->
|
||||
backupReceiver.readFromStream(owner, inputStream)
|
||||
}
|
||||
val backupData = backupReceiver.finalize()
|
||||
// store base split in builder
|
||||
val baseSplit = split {
|
||||
name = BASE_SPLIT
|
||||
chunkIds.addAll(backupData.chunks.forProto())
|
||||
chunkIds.addAll(backupData.chunkIds.forProto())
|
||||
}
|
||||
apkBuilder
|
||||
.addSplits(baseSplit)
|
||||
val chunkMap = backupData.chunkMap.toMutableMap()
|
||||
apkBuilder.addSplits(baseSplit)
|
||||
val blobMap = backupData.blobMap.toMutableMap()
|
||||
|
||||
// back up splits if they exist
|
||||
val splits = if (packageInfo.splitNames == null) {
|
||||
emptyList()
|
||||
} else {
|
||||
backupSplitApks(packageInfo, chunkMap)
|
||||
}
|
||||
val splits = backupSplitApks(packageInfo, blobMap)
|
||||
val apk = apkBuilder.addAllSplits(splits).build()
|
||||
snapshotCreator.onApkBackedUp(packageInfo, apk, chunkMap)
|
||||
snapshotCreator.onApkBackedUp(packageInfo, apk, blobMap)
|
||||
Log.d(TAG, "Backed up new APK of $packageName with version ${packageInfo.versionName}.")
|
||||
}
|
||||
|
||||
|
@ -181,9 +176,8 @@ internal class ApkBackup(
|
|||
@Throws(IOException::class)
|
||||
private suspend fun backupSplitApks(
|
||||
packageInfo: PackageInfo,
|
||||
chunkMap: MutableMap<String, Blob>,
|
||||
blobMap: MutableMap<String, Blob>,
|
||||
): List<Snapshot.Split> {
|
||||
check(packageInfo.splitNames != null)
|
||||
// attention: though not documented, splitSourceDirs can be null
|
||||
val splitSourceDirs = packageInfo.applicationInfo?.splitSourceDirs ?: emptyArray()
|
||||
check(packageInfo.splitNames.size == splitSourceDirs.size) {
|
||||
|
@ -193,21 +187,24 @@ internal class ApkBackup(
|
|||
}
|
||||
val splits = ArrayList<Snapshot.Split>(packageInfo.splitNames.size)
|
||||
for (i in packageInfo.splitNames.indices) {
|
||||
val splitName = packageInfo.splitNames[i]
|
||||
val owner = getOwner(packageInfo.packageName, splitName)
|
||||
// copy the split APK to the storage stream
|
||||
getApkInputStream(splitSourceDirs[i]).use { inputStream ->
|
||||
backupReceiver.readFromStream(inputStream)
|
||||
val backupData = getApkInputStream(splitSourceDirs[i]).use { inputStream ->
|
||||
backupReceiver.readFromStream(owner, inputStream)
|
||||
}
|
||||
val backupData = backupReceiver.finalize()
|
||||
val split = Snapshot.Split.newBuilder()
|
||||
.setName(packageInfo.splitNames[i])
|
||||
.addAllChunkIds(backupData.chunks.forProto())
|
||||
.setName(splitName)
|
||||
.addAllChunkIds(backupData.chunkIds.forProto())
|
||||
.build()
|
||||
splits.add(split)
|
||||
chunkMap.putAll(backupData.chunkMap)
|
||||
blobMap.putAll(backupData.blobMap)
|
||||
}
|
||||
return splits
|
||||
}
|
||||
|
||||
private fun getOwner(packageName: String, split: String) = "APK backup $packageName $split"
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -96,8 +96,16 @@ internal class IconManager(
|
|||
zip.closeEntry()
|
||||
}
|
||||
}
|
||||
backupReceiver.addBytes(byteArrayOutputStream.toByteArray())
|
||||
val backupData = backupReceiver.finalize()
|
||||
val owner = "IconManager"
|
||||
try {
|
||||
backupReceiver.addBytes(owner, byteArrayOutputStream.toByteArray())
|
||||
} catch (e: Exception) {
|
||||
// ensure to call finalize, even if an exception gets thrown while adding bytes
|
||||
backupReceiver.finalize(owner)
|
||||
throw e
|
||||
}
|
||||
// call finalize and add to snapshot only when we got here without exception
|
||||
val backupData = backupReceiver.finalize(owner)
|
||||
snapshotCreator.onIconsBackedUp(backupData)
|
||||
Log.d(TAG, "Finished uploading icons")
|
||||
}
|
||||
|
|
|
@ -142,14 +142,15 @@ internal class ApkBackupRestoreTest : TransportTest() {
|
|||
every { PackageUtils.computeSha256DigestBytes(signatureBytes) } returns signatureHash
|
||||
every { snapshotManager.latestSnapshot } returns snapshot
|
||||
every { pm.getInstallSourceInfo(packageInfo.packageName) } returns mockk(relaxed = true)
|
||||
coEvery { backupReceiver.readFromStream(capture(capturedApkStream)) } answers {
|
||||
coEvery { backupReceiver.readFromStream(any(), capture(capturedApkStream)) } answers {
|
||||
capturedApkStream.captured.copyTo(outputStream)
|
||||
apkBackupData
|
||||
} andThenAnswer {
|
||||
capturedApkStream.captured.copyTo(splitOutputStream)
|
||||
splitBackupData
|
||||
}
|
||||
coEvery { backupReceiver.finalize() } returns apkBackupData andThen splitBackupData
|
||||
every {
|
||||
snapshotCreator.onApkBackedUp(packageInfo, any<Snapshot.Apk>(), chunkMap)
|
||||
snapshotCreator.onApkBackedUp(packageInfo, any<Snapshot.Apk>(), blobMap)
|
||||
} just Runs
|
||||
|
||||
apkBackup.backupApkIfNecessary(packageInfo, snapshot)
|
||||
|
|
|
@ -488,7 +488,7 @@ internal class ApkRestoreTest : TransportTest() {
|
|||
}
|
||||
val splitBlob1 = blob { id = copyFrom(Random.nextBytes(32)) }
|
||||
val splitBlob2 = blob { id = copyFrom(Random.nextBytes(32)) }
|
||||
val blobMap = apkBackupData.chunkMap +
|
||||
val blobMap = apkBackupData.blobMap +
|
||||
mapOf(splitChunkId1 to splitBlob1) +
|
||||
mapOf(splitChunkId2 to splitBlob2)
|
||||
val app = appNoSplit.copy {
|
||||
|
|
|
@ -147,7 +147,6 @@ internal class CoordinatorIntegrationTest : TransportTest() {
|
|||
val inputStream = CapturingSlot<InputStream>()
|
||||
val bOutputStream = ByteArrayOutputStream()
|
||||
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
// read one key/value record and write it to output stream
|
||||
every { inputFactory.getBackupDataInput(fileDescriptor) } returns backupDataInput
|
||||
every { backupDataInput.readNextHeader() } returns true andThen true andThen false
|
||||
|
@ -166,10 +165,10 @@ internal class CoordinatorIntegrationTest : TransportTest() {
|
|||
assertEquals(TRANSPORT_OK, backup.performIncrementalBackup(packageInfo, fileDescriptor, 0))
|
||||
|
||||
// upload DB
|
||||
coEvery { backupReceiver.readFromStream(capture(inputStream)) } answers {
|
||||
coEvery { backupReceiver.readFromStream(any(), capture(inputStream)) } answers {
|
||||
inputStream.captured.copyTo(bOutputStream)
|
||||
apkBackupData
|
||||
}
|
||||
coEvery { backupReceiver.finalize() } returns apkBackupData
|
||||
every {
|
||||
snapshotCreator.onPackageBackedUp(packageInfo, BackupType.KV, apkBackupData)
|
||||
} just Runs
|
||||
|
@ -216,7 +215,6 @@ internal class CoordinatorIntegrationTest : TransportTest() {
|
|||
val appData = ByteArray(size).apply { Random.nextBytes(this) }
|
||||
val bOutputStream = ByteArrayOutputStream()
|
||||
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
// read one key/value record and write it to output stream
|
||||
every { inputFactory.getBackupDataInput(fileDescriptor) } returns backupDataInput
|
||||
every { backupDataInput.readNextHeader() } returns true andThen false
|
||||
|
@ -231,10 +229,10 @@ internal class CoordinatorIntegrationTest : TransportTest() {
|
|||
assertEquals(TRANSPORT_OK, backup.performIncrementalBackup(packageInfo, fileDescriptor, 0))
|
||||
|
||||
// upload DB
|
||||
coEvery { backupReceiver.readFromStream(capture(inputStream)) } answers {
|
||||
coEvery { backupReceiver.readFromStream(any(), capture(inputStream)) } answers {
|
||||
inputStream.captured.copyTo(bOutputStream)
|
||||
apkBackupData
|
||||
}
|
||||
coEvery { backupReceiver.finalize() } returns apkBackupData
|
||||
every {
|
||||
snapshotCreator.onPackageBackedUp(packageInfo, BackupType.KV, apkBackupData)
|
||||
} just Runs
|
||||
|
@ -287,9 +285,8 @@ internal class CoordinatorIntegrationTest : TransportTest() {
|
|||
val bInputStream = ByteArrayInputStream(appData)
|
||||
|
||||
every { inputFactory.getInputStream(fileDescriptor) } returns bInputStream
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
every { settingsManager.isQuotaUnlimited() } returns false
|
||||
coEvery { backupReceiver.addBytes(capture(byteSlot)) } answers {
|
||||
coEvery { backupReceiver.addBytes(any(), capture(byteSlot)) } answers {
|
||||
bOutputStream.writeBytes(byteSlot.captured)
|
||||
}
|
||||
every {
|
||||
|
@ -302,7 +299,7 @@ internal class CoordinatorIntegrationTest : TransportTest() {
|
|||
size = apkBackupData.size,
|
||||
)
|
||||
} just Runs
|
||||
coEvery { backupReceiver.finalize() } returns apkBackupData // just some backupData
|
||||
coEvery { backupReceiver.finalize(any()) } returns apkBackupData // just some backupData
|
||||
|
||||
// perform backup to output stream
|
||||
assertEquals(TRANSPORT_OK, backup.performFullBackup(packageInfo, fileDescriptor, 0))
|
||||
|
|
|
@ -65,6 +65,7 @@ internal abstract class TransportTest {
|
|||
longVersionCode = Random.nextLong()
|
||||
applicationInfo = this@TransportTest.applicationInfo
|
||||
signingInfo = sigInfo
|
||||
splitNames = emptyArray()
|
||||
}
|
||||
protected val packageName: String = packageInfo.packageName
|
||||
protected val pmPackageInfo = PackageInfo().apply {
|
||||
|
@ -104,7 +105,7 @@ internal abstract class TransportTest {
|
|||
protected val apkBackupData = BackupData(listOf(chunkId1), mapOf(chunkId1 to blob1))
|
||||
protected val splitBackupData =
|
||||
BackupData(listOf(chunkId2), mapOf(chunkId2 to blob2))
|
||||
protected val chunkMap = apkBackupData.chunkMap + splitBackupData.chunkMap
|
||||
protected val blobMap = apkBackupData.blobMap + splitBackupData.blobMap
|
||||
protected val baseSplit = split {
|
||||
name = BASE_SPLIT
|
||||
chunkIds.add(ByteString.fromHex(chunkId1))
|
||||
|
@ -126,7 +127,7 @@ internal abstract class TransportTest {
|
|||
protected val snapshot = snapshot {
|
||||
token = this@TransportTest.token
|
||||
apps[packageName] = app
|
||||
blobs.putAll(chunkMap)
|
||||
blobs.putAll(blobMap)
|
||||
}
|
||||
protected val metadata = BackupMetadata(
|
||||
token = token,
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* SPDX-FileCopyrightText: 2024 The Calyx Institute
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package com.stevesoltys.seedvault.transport.backup
|
||||
|
||||
import com.stevesoltys.seedvault.getRandomByteArray
|
||||
import com.stevesoltys.seedvault.proto.Snapshot
|
||||
import com.stevesoltys.seedvault.transport.TransportTest
|
||||
import io.mockk.Runs
|
||||
import io.mockk.coEvery
|
||||
import io.mockk.every
|
||||
import io.mockk.just
|
||||
import io.mockk.mockk
|
||||
import io.mockk.verify
|
||||
import kotlinx.coroutines.runBlocking
|
||||
import org.calyxos.seedvault.chunker.Chunk
|
||||
import org.calyxos.seedvault.chunker.Chunker
|
||||
import org.junit.jupiter.api.Assertions.assertEquals
|
||||
import org.junit.jupiter.api.Test
|
||||
import org.junit.jupiter.api.assertThrows
|
||||
import java.io.ByteArrayInputStream
|
||||
import java.io.IOException
|
||||
import java.io.InputStream
|
||||
|
||||
internal class BackupReceiverTest : TransportTest() {
|
||||
|
||||
private val blobCache: BlobCache = mockk()
|
||||
private val blobCreator: BlobCreator = mockk()
|
||||
private val chunker: Chunker = mockk()
|
||||
|
||||
private val backupReceiver = BackupReceiver(
|
||||
blobCache = blobCache,
|
||||
blobCreator = blobCreator,
|
||||
crypto = crypto,
|
||||
replaceableChunker = chunker,
|
||||
)
|
||||
|
||||
@Test
|
||||
fun `ownership is enforced`() = runBlocking {
|
||||
every { chunker.addBytes(ByteArray(0)) } returns emptySequence()
|
||||
|
||||
backupReceiver.addBytes("foo", ByteArray(0))
|
||||
assertThrows<IllegalStateException> {
|
||||
backupReceiver.addBytes("bar", ByteArray(0))
|
||||
}
|
||||
every { chunker.finalize() } returns emptySequence()
|
||||
assertThrows<IllegalStateException> {
|
||||
backupReceiver.readFromStream("bar", ByteArrayInputStream(ByteArray(0)))
|
||||
}
|
||||
assertThrows<IllegalStateException> {
|
||||
backupReceiver.finalize("bar")
|
||||
}
|
||||
// finalize with proper owner
|
||||
backupReceiver.finalize("foo")
|
||||
// now "bar" can add bytes
|
||||
backupReceiver.addBytes("bar", ByteArray(0))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `add bytes and finalize`() = runBlocking {
|
||||
val bytes = getRandomByteArray()
|
||||
val chunkBytes1 = getRandomByteArray()
|
||||
val chunkBytes2 = getRandomByteArray()
|
||||
val chunk1 = Chunk(0, chunkBytes1.size, chunkBytes1, "hash1")
|
||||
val chunk2 = Chunk(0, chunkBytes2.size, chunkBytes2, "hash2")
|
||||
|
||||
// chunk1 is new, but chunk2 is already cached
|
||||
every { chunker.addBytes(bytes) } returns sequenceOf(chunk1)
|
||||
every { chunker.finalize() } returns sequenceOf(chunk2)
|
||||
every { blobCache["hash1"] } returns null
|
||||
every { blobCache["hash2"] } returns blob2
|
||||
coEvery { blobCreator.createNewBlob(chunk1) } returns blob1
|
||||
coEvery { blobCache.saveNewBlob("hash1", blob1) } just Runs
|
||||
|
||||
// add bytes and finalize
|
||||
backupReceiver.addBytes("foo", bytes)
|
||||
val backupData = backupReceiver.finalize("foo")
|
||||
|
||||
// assert that backupData includes all chunks and blobs
|
||||
assertEquals(listOf("hash1", "hash2"), backupData.chunkIds)
|
||||
assertEquals(setOf("hash1", "hash2"), backupData.blobMap.keys)
|
||||
assertEquals(blob1, backupData.blobMap["hash1"])
|
||||
assertEquals(blob2, backupData.blobMap["hash2"])
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `readFromStream`() = runBlocking {
|
||||
val bytes = getRandomByteArray()
|
||||
val chunkBytes1 = getRandomByteArray()
|
||||
val chunkBytes2 = getRandomByteArray()
|
||||
val chunk1 = Chunk(0, chunkBytes1.size, chunkBytes1, "hash1")
|
||||
val chunk2 = Chunk(0, chunkBytes2.size, chunkBytes2, "hash2")
|
||||
|
||||
// chunk1 is new, but chunk2 is already cached
|
||||
every { chunker.addBytes(bytes) } returns sequenceOf(chunk1)
|
||||
every { chunker.finalize() } returns sequenceOf(chunk2)
|
||||
every { blobCache["hash1"] } returns null
|
||||
every { blobCache["hash2"] } returns blob2
|
||||
coEvery { blobCreator.createNewBlob(chunk1) } returns blob1
|
||||
coEvery { blobCache.saveNewBlob("hash1", blob1) } just Runs
|
||||
|
||||
// add bytes and finalize
|
||||
val backupData = backupReceiver.readFromStream("foo", ByteArrayInputStream(bytes))
|
||||
|
||||
// assert that backupData includes all chunks and blobs
|
||||
assertEquals(listOf("hash1", "hash2"), backupData.chunkIds)
|
||||
assertEquals(setOf("hash1", "hash2"), backupData.blobMap.keys)
|
||||
assertEquals(blob1, backupData.blobMap["hash1"])
|
||||
assertEquals(blob2, backupData.blobMap["hash2"])
|
||||
|
||||
// data should be all empty when calling finalize again
|
||||
every { chunker.finalize() } returns emptySequence()
|
||||
val backupDataEnd = backupReceiver.finalize("foo")
|
||||
assertEquals(emptyList<String>(), backupDataEnd.chunkIds)
|
||||
assertEquals(emptyMap<String, Snapshot.Blob>(), backupDataEnd.blobMap)
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `readFromStream auto-finalizes when it throws`() = runBlocking {
|
||||
val inputStream: InputStream = mockk()
|
||||
|
||||
every { inputStream.read(any<ByteArray>()) } throws IOException()
|
||||
every { chunker.finalize() } returns emptySequence()
|
||||
|
||||
assertThrows<IOException> {
|
||||
backupReceiver.readFromStream("foo", inputStream)
|
||||
}
|
||||
|
||||
verify {
|
||||
chunker.finalize()
|
||||
}
|
||||
|
||||
// bytes can be added with different owner now
|
||||
every { chunker.addBytes(ByteArray(0)) } returns emptySequence()
|
||||
backupReceiver.addBytes("bar", ByteArray(0))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `finalizing happens even if creating new blob throws`() = runBlocking {
|
||||
val bytes = getRandomByteArray()
|
||||
val chunkBytes1 = getRandomByteArray()
|
||||
val chunkBytes2 = getRandomByteArray()
|
||||
val chunk1 = Chunk(0, chunkBytes1.size, chunkBytes1, chunkId1)
|
||||
val chunk2 = Chunk(0, chunkBytes2.size, chunkBytes2, chunkId2)
|
||||
|
||||
// chunk1 is new, but chunk2 is already cached
|
||||
every { chunker.addBytes(bytes) } returns sequenceOf(chunk1)
|
||||
every { chunker.finalize() } returns sequenceOf(chunk2)
|
||||
every { blobCache[chunkId1] } returns blob1
|
||||
every { blobCache[chunkId2] } returns null
|
||||
coEvery { blobCreator.createNewBlob(chunk2) } throws IOException()
|
||||
|
||||
assertThrows<IOException> {
|
||||
backupReceiver.finalize("foo")
|
||||
}
|
||||
|
||||
// now we can finalize again with different owner
|
||||
every { chunker.finalize() } returns emptySequence()
|
||||
val backupData = backupReceiver.finalize("foo")
|
||||
|
||||
// data should be all empty, not include blob1
|
||||
assertEquals(emptyList<String>(), backupData.chunkIds)
|
||||
assertEquals(emptyMap<String, Snapshot.Blob>(), backupData.blobMap)
|
||||
}
|
||||
}
|
|
@ -90,12 +90,11 @@ internal class FullBackupTest : BackupTest() {
|
|||
@Test
|
||||
fun `performFullBackup runs ok`() = runBlocking {
|
||||
every { inputFactory.getInputStream(data) } returns inputStream
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
|
||||
assertEquals(TRANSPORT_OK, backup.performFullBackup(packageInfo, data, 0))
|
||||
assertTrue(backup.hasState)
|
||||
|
||||
coEvery { backupReceiver.finalize() } returns backupData
|
||||
coEvery { backupReceiver.finalize("FullBackup $packageName") } returns backupData
|
||||
expectClearState()
|
||||
|
||||
assertEquals(backupData, backup.finishBackup())
|
||||
|
@ -106,7 +105,6 @@ internal class FullBackupTest : BackupTest() {
|
|||
fun `sendBackupData first call over quota`() = runBlocking {
|
||||
every { settingsManager.isQuotaUnlimited() } returns false
|
||||
every { inputFactory.getInputStream(data) } returns inputStream
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
val numBytes = (quota + 1).toInt()
|
||||
expectSendData(numBytes)
|
||||
|
||||
|
@ -115,7 +113,7 @@ internal class FullBackupTest : BackupTest() {
|
|||
assertEquals(TRANSPORT_QUOTA_EXCEEDED, backup.sendBackupData(numBytes))
|
||||
assertTrue(backup.hasState)
|
||||
|
||||
coEvery { backupReceiver.finalize() } returns backupData
|
||||
coEvery { backupReceiver.finalize("FullBackup $packageName") } returns backupData
|
||||
expectClearState()
|
||||
|
||||
assertEquals(backupData, backup.finishBackup())
|
||||
|
@ -126,7 +124,6 @@ internal class FullBackupTest : BackupTest() {
|
|||
fun `sendBackupData subsequent calls over quota`() = runBlocking {
|
||||
every { settingsManager.isQuotaUnlimited() } returns false
|
||||
every { inputFactory.getInputStream(data) } returns inputStream
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
|
||||
assertEquals(TRANSPORT_OK, backup.performFullBackup(packageInfo, data, 0))
|
||||
assertTrue(backup.hasState)
|
||||
|
@ -142,7 +139,7 @@ internal class FullBackupTest : BackupTest() {
|
|||
}
|
||||
assertEquals(TRANSPORT_QUOTA_EXCEEDED, sendResult)
|
||||
|
||||
coEvery { backupReceiver.finalize() } returns backupData
|
||||
coEvery { backupReceiver.finalize("FullBackup $packageName") } returns backupData
|
||||
expectClearState()
|
||||
|
||||
// in reality, this may not call finishBackup(), but cancelBackup()
|
||||
|
@ -153,7 +150,6 @@ internal class FullBackupTest : BackupTest() {
|
|||
@Test
|
||||
fun `sendBackupData throws exception when reading from InputStream`() = runBlocking {
|
||||
every { inputFactory.getInputStream(data) } returns inputStream
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
|
||||
assertEquals(TRANSPORT_OK, backup.performFullBackup(packageInfo, data, 0))
|
||||
assertTrue(backup.hasState)
|
||||
|
@ -164,7 +160,7 @@ internal class FullBackupTest : BackupTest() {
|
|||
assertEquals(TRANSPORT_ERROR, backup.sendBackupData(bytes.size))
|
||||
assertTrue(backup.hasState)
|
||||
|
||||
coEvery { backupReceiver.finalize() } returns backupData
|
||||
coEvery { backupReceiver.finalize("FullBackup $packageName") } returns backupData
|
||||
expectClearState()
|
||||
|
||||
assertEquals(backupData, backup.finishBackup())
|
||||
|
@ -174,19 +170,18 @@ internal class FullBackupTest : BackupTest() {
|
|||
@Test
|
||||
fun `sendBackupData throws exception when sending data`() = runBlocking {
|
||||
every { inputFactory.getInputStream(data) } returns inputStream
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
|
||||
assertEquals(TRANSPORT_OK, backup.performFullBackup(packageInfo, data, 0))
|
||||
assertTrue(backup.hasState)
|
||||
|
||||
every { settingsManager.isQuotaUnlimited() } returns false
|
||||
every { inputStream.read(any(), 0, bytes.size) } returns bytes.size
|
||||
coEvery { backupReceiver.addBytes(any()) } throws IOException()
|
||||
coEvery { backupReceiver.addBytes("FullBackup $packageName", any()) } throws IOException()
|
||||
|
||||
assertEquals(TRANSPORT_ERROR, backup.sendBackupData(bytes.size))
|
||||
assertTrue(backup.hasState)
|
||||
|
||||
coEvery { backupReceiver.finalize() } returns backupData
|
||||
coEvery { backupReceiver.finalize("FullBackup $packageName") } returns backupData
|
||||
expectClearState()
|
||||
|
||||
assertEquals(backupData, backup.finishBackup())
|
||||
|
@ -196,7 +191,6 @@ internal class FullBackupTest : BackupTest() {
|
|||
@Test
|
||||
fun `sendBackupData throws exception when finalizing`() = runBlocking {
|
||||
every { inputFactory.getInputStream(data) } returns inputStream
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
|
||||
assertEquals(TRANSPORT_OK, backup.performFullBackup(packageInfo, data, 0))
|
||||
assertTrue(backup.hasState)
|
||||
|
@ -207,7 +201,7 @@ internal class FullBackupTest : BackupTest() {
|
|||
assertEquals(TRANSPORT_OK, backup.sendBackupData(bytes.size))
|
||||
assertTrue(backup.hasState)
|
||||
|
||||
coEvery { backupReceiver.finalize() } throws IOException()
|
||||
coEvery { backupReceiver.finalize("FullBackup $packageName") } throws IOException()
|
||||
expectClearState()
|
||||
|
||||
assertThrows<IOException> {
|
||||
|
@ -222,7 +216,6 @@ internal class FullBackupTest : BackupTest() {
|
|||
fun `sendBackupData runs ok`() = runBlocking {
|
||||
every { settingsManager.isQuotaUnlimited() } returns false
|
||||
every { inputFactory.getInputStream(data) } returns inputStream
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
|
||||
assertEquals(TRANSPORT_OK, backup.performFullBackup(packageInfo, data, 0))
|
||||
assertTrue(backup.hasState)
|
||||
|
@ -237,7 +230,7 @@ internal class FullBackupTest : BackupTest() {
|
|||
assertEquals(TRANSPORT_OK, backup.sendBackupData(numBytes2))
|
||||
assertTrue(backup.hasState)
|
||||
|
||||
coEvery { backupReceiver.finalize() } returns backupData
|
||||
coEvery { backupReceiver.finalize("FullBackup $packageName") } returns backupData
|
||||
expectClearState()
|
||||
|
||||
assertEquals(backupData, backup.finishBackup())
|
||||
|
@ -247,12 +240,11 @@ internal class FullBackupTest : BackupTest() {
|
|||
@Test
|
||||
fun `cancel full backup runs ok`() = runBlocking {
|
||||
every { inputFactory.getInputStream(data) } returns inputStream
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
|
||||
assertEquals(TRANSPORT_OK, backup.performFullBackup(packageInfo, data, 0))
|
||||
assertTrue(backup.hasState)
|
||||
|
||||
coEvery { backupReceiver.finalize() } returns backupData
|
||||
coEvery { backupReceiver.finalize("FullBackup $packageName") } returns backupData
|
||||
expectClearState()
|
||||
|
||||
backup.cancelFullBackup()
|
||||
|
@ -262,12 +254,11 @@ internal class FullBackupTest : BackupTest() {
|
|||
@Test
|
||||
fun `cancel full backup throws exception when finalizing`() = runBlocking {
|
||||
every { inputFactory.getInputStream(data) } returns inputStream
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
|
||||
assertEquals(TRANSPORT_OK, backup.performFullBackup(packageInfo, data, 0))
|
||||
assertTrue(backup.hasState)
|
||||
|
||||
coEvery { backupReceiver.finalize() } throws IOException()
|
||||
coEvery { backupReceiver.finalize("FullBackup $packageName") } throws IOException()
|
||||
expectClearState()
|
||||
|
||||
backup.cancelFullBackup()
|
||||
|
@ -277,12 +268,11 @@ internal class FullBackupTest : BackupTest() {
|
|||
@Test
|
||||
fun `clearState ignores exception when closing InputStream`() = runBlocking {
|
||||
every { inputFactory.getInputStream(data) } returns inputStream
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
|
||||
assertEquals(TRANSPORT_OK, backup.performFullBackup(packageInfo, data, 0))
|
||||
assertTrue(backup.hasState)
|
||||
|
||||
coEvery { backupReceiver.finalize() } returns backupData
|
||||
coEvery { backupReceiver.finalize("FullBackup $packageName") } returns backupData
|
||||
every { outputStream.flush() } just Runs
|
||||
every { outputStream.close() } just Runs
|
||||
every { inputStream.close() } throws IOException()
|
||||
|
@ -295,12 +285,11 @@ internal class FullBackupTest : BackupTest() {
|
|||
@Test
|
||||
fun `clearState ignores exception when closing ParcelFileDescriptor`() = runBlocking {
|
||||
every { inputFactory.getInputStream(data) } returns inputStream
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
|
||||
assertEquals(TRANSPORT_OK, backup.performFullBackup(packageInfo, data, 0))
|
||||
assertTrue(backup.hasState)
|
||||
|
||||
coEvery { backupReceiver.finalize() } returns backupData
|
||||
coEvery { backupReceiver.finalize("FullBackup $packageName") } returns backupData
|
||||
every { outputStream.flush() } just Runs
|
||||
every { outputStream.close() } just Runs
|
||||
every { inputStream.close() } just Runs
|
||||
|
@ -312,7 +301,7 @@ internal class FullBackupTest : BackupTest() {
|
|||
|
||||
private fun expectSendData(numBytes: Int, readBytes: Int = numBytes) {
|
||||
every { inputStream.read(any(), any(), numBytes) } returns readBytes
|
||||
coEvery { backupReceiver.addBytes(any()) } just Runs
|
||||
coEvery { backupReceiver.addBytes("FullBackup $packageName", any()) } just Runs
|
||||
}
|
||||
|
||||
private fun expectClearState() {
|
||||
|
|
|
@ -107,7 +107,6 @@ internal class KVBackupTest : BackupTest() {
|
|||
|
||||
@Test
|
||||
fun `package with no new data comes back ok right away (if we have data)`() = runBlocking {
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
every { dbManager.existsDb(packageName) } returns true
|
||||
every { dbManager.getDb(packageName) } returns db
|
||||
every { data.close() } just Runs
|
||||
|
@ -128,7 +127,6 @@ internal class KVBackupTest : BackupTest() {
|
|||
|
||||
@Test
|
||||
fun `request non-incremental backup when no data has changed, but we lost it`() = runBlocking {
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
every { dbManager.existsDb(packageName) } returns false
|
||||
every { dbManager.getDb(packageName) } returns db
|
||||
every { db.close() } just Runs
|
||||
|
@ -224,33 +222,6 @@ internal class KVBackupTest : BackupTest() {
|
|||
verify { data.close() }
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `exception while finalizing`() = runBlocking {
|
||||
initPlugin(false)
|
||||
getDataInput(listOf(true, false))
|
||||
every { db.put(key, dataValue) } just Runs
|
||||
every { data.close() } just Runs
|
||||
|
||||
assertEquals(TRANSPORT_OK, backup.performBackup(packageInfo, data, 0))
|
||||
assertTrue(backup.hasState)
|
||||
|
||||
every { db.vacuum() } just Runs
|
||||
every { db.close() } just Runs
|
||||
every { dbManager.getDbInputStream(packageName) } returns inputStream
|
||||
coEvery { backupReceiver.readFromStream(inputStream) } just Runs
|
||||
coEvery { backupReceiver.finalize() } throws IOException()
|
||||
|
||||
assertThrows<IOException> { // we let exceptions bubble up to coordinators
|
||||
backup.finishBackup()
|
||||
}
|
||||
assertFalse(backup.hasState)
|
||||
|
||||
verify {
|
||||
db.close()
|
||||
data.close()
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `exception while uploading data`() = runBlocking {
|
||||
initPlugin(false)
|
||||
|
@ -264,7 +235,9 @@ internal class KVBackupTest : BackupTest() {
|
|||
every { db.vacuum() } just Runs
|
||||
every { db.close() } just Runs
|
||||
every { dbManager.getDbInputStream(packageName) } returns inputStream
|
||||
coEvery { backupReceiver.readFromStream(inputStream) } throws IOException()
|
||||
coEvery {
|
||||
backupReceiver.readFromStream("KV $packageName", inputStream)
|
||||
} throws IOException()
|
||||
|
||||
assertThrows<IOException> { // we let exceptions bubble up to coordinators
|
||||
backup.finishBackup()
|
||||
|
@ -285,7 +258,6 @@ internal class KVBackupTest : BackupTest() {
|
|||
}
|
||||
|
||||
private fun initPlugin(hasDataForPackage: Boolean = false, pi: PackageInfo = packageInfo) {
|
||||
every { backupReceiver.assertFinalized() } just Runs
|
||||
every { dbManager.existsDb(pi.packageName) } returns hasDataForPackage
|
||||
every { dbManager.getDb(pi.packageName) } returns db
|
||||
}
|
||||
|
@ -310,8 +282,9 @@ internal class KVBackupTest : BackupTest() {
|
|||
every { db.vacuum() } just Runs
|
||||
every { db.close() } just Runs
|
||||
every { dbManager.getDbInputStream(packageName) } returns inputStream
|
||||
coEvery { backupReceiver.readFromStream(inputStream) } just Runs
|
||||
coEvery { backupReceiver.finalize() } returns apkBackupData
|
||||
coEvery {
|
||||
backupReceiver.readFromStream("KV $packageName", inputStream)
|
||||
} returns apkBackupData
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -112,14 +112,14 @@ internal class ApkBackupTest : BackupTest() {
|
|||
val s = snapshot.copy { apps.put(packageName, app) }
|
||||
expectChecks()
|
||||
every {
|
||||
snapshotCreator.onApkBackedUp(packageInfo, apk, chunkMap)
|
||||
snapshotCreator.onApkBackedUp(packageInfo, apk, blobMap)
|
||||
} just Runs
|
||||
|
||||
apkBackup.backupApkIfNecessary(packageInfo, s)
|
||||
|
||||
// ensure we are still snapshotting this version
|
||||
verify {
|
||||
snapshotCreator.onApkBackedUp(packageInfo, apk, chunkMap)
|
||||
snapshotCreator.onApkBackedUp(packageInfo, apk, blobMap)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -142,23 +142,23 @@ internal class ApkBackupTest : BackupTest() {
|
|||
every {
|
||||
pm.getInstallSourceInfo(packageInfo.packageName)
|
||||
} returns InstallSourceInfo(null, null, null, apk.installer)
|
||||
coEvery { backupReceiver.readFromStream(any()) } just Runs
|
||||
coEvery { backupReceiver.finalize() } returns apkBackupData
|
||||
coEvery {
|
||||
backupReceiver.readFromStream("APK backup $packageName ", any())
|
||||
} returns apkBackupData
|
||||
|
||||
every {
|
||||
snapshotCreator.onApkBackedUp(packageInfo, match<Snapshot.Apk> {
|
||||
it.signaturesList != apk.signaturesList
|
||||
}, apkBackupData.chunkMap)
|
||||
}, apkBackupData.blobMap)
|
||||
} just Runs
|
||||
|
||||
apkBackup.backupApkIfNecessary(packageInfo, s)
|
||||
|
||||
coVerify {
|
||||
backupReceiver.readFromStream(any())
|
||||
backupReceiver.finalize()
|
||||
backupReceiver.readFromStream("APK backup $packageName ", any())
|
||||
snapshotCreator.onApkBackedUp(packageInfo, match<Snapshot.Apk> {
|
||||
it.signaturesList != apk.signaturesList
|
||||
}, apkBackupData.chunkMap)
|
||||
}, apkBackupData.blobMap)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -211,10 +211,12 @@ internal class ApkBackupTest : BackupTest() {
|
|||
every {
|
||||
pm.getInstallSourceInfo(packageInfo.packageName)
|
||||
} returns InstallSourceInfo(null, null, null, installer)
|
||||
coEvery { backupReceiver.readFromStream(capture(capturedStream)) } answers {
|
||||
coEvery {
|
||||
backupReceiver.readFromStream("APK backup $packageName ", capture(capturedStream))
|
||||
} answers {
|
||||
capturedStream.captured.copyTo(apkOutputStream)
|
||||
BackupData(emptyList(), emptyMap())
|
||||
}
|
||||
coEvery { backupReceiver.finalize() } returns BackupData(emptyList(), emptyMap())
|
||||
every {
|
||||
snapshotCreator.onApkBackedUp(packageInfo, match<Snapshot.Apk> {
|
||||
it.installer == installer
|
||||
|
@ -223,10 +225,6 @@ internal class ApkBackupTest : BackupTest() {
|
|||
|
||||
apkBackup.backupApkIfNecessary(packageInfo, snapshot)
|
||||
assertArrayEquals(apkBytes, apkOutputStream.toByteArray())
|
||||
|
||||
coVerify {
|
||||
backupReceiver.finalize()
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -266,14 +264,28 @@ internal class ApkBackupTest : BackupTest() {
|
|||
every {
|
||||
pm.getInstallSourceInfo(packageInfo.packageName)
|
||||
} returns InstallSourceInfo(null, null, null, installer)
|
||||
coEvery { backupReceiver.readFromStream(capture(capturedStream)) } answers {
|
||||
coEvery {
|
||||
backupReceiver.readFromStream("APK backup $packageName ", capture(capturedStream))
|
||||
} answers {
|
||||
capturedStream.captured.copyTo(apkOutputStream)
|
||||
} andThenAnswer {
|
||||
capturedStream.captured.copyTo(split1OutputStream)
|
||||
} andThenAnswer {
|
||||
capturedStream.captured.copyTo(split2OutputStream)
|
||||
BackupData(emptyList(), emptyMap())
|
||||
}
|
||||
coEvery {
|
||||
backupReceiver.readFromStream(
|
||||
"APK backup $packageName $split1Name", capture(capturedStream)
|
||||
)
|
||||
} answers {
|
||||
capturedStream.captured.copyTo(split1OutputStream)
|
||||
BackupData(emptyList(), emptyMap())
|
||||
}
|
||||
coEvery {
|
||||
backupReceiver.readFromStream(
|
||||
"APK backup $packageName $split2Name", capture(capturedStream)
|
||||
)
|
||||
} answers {
|
||||
capturedStream.captured.copyTo(split2OutputStream)
|
||||
BackupData(emptyList(), emptyMap())
|
||||
}
|
||||
coEvery { backupReceiver.finalize() } returns BackupData(emptyList(), emptyMap())
|
||||
every {
|
||||
snapshotCreator.onApkBackedUp(packageInfo, match<Snapshot.Apk> {
|
||||
it.installer == installer &&
|
||||
|
@ -286,10 +298,6 @@ internal class ApkBackupTest : BackupTest() {
|
|||
assertArrayEquals(apkBytes, apkOutputStream.toByteArray())
|
||||
assertArrayEquals(split1Bytes, split1OutputStream.toByteArray())
|
||||
assertArrayEquals(split2Bytes, split2OutputStream.toByteArray())
|
||||
|
||||
coVerify {
|
||||
backupReceiver.finalize()
|
||||
}
|
||||
}
|
||||
|
||||
private fun expectChecks() {
|
||||
|
|
Loading…
Reference in a new issue