Fully implement BlobCache

This class is responsible for caching blobs during a backup run,
so we can know that a blob for the given chunk ID already exists
and does not need to be uploaded again.

It builds up its cache from snapshots available on the backend
and from the persistent cache that includes blobs that could not be added to a snapshot,
because the backup was aborted.
This commit is contained in:
Torsten Grote 2024-09-12 15:59:35 -03:00
parent 5b567c79a2
commit 952cdec55d
No known key found for this signature in database
GPG key ID: 3E5F77D92CF891FF
15 changed files with 392 additions and 135 deletions

View file

@ -62,7 +62,7 @@ open class App : Application() {
private val appModule = module {
single { SettingsManager(this@App) }
single { BackupNotificationManager(this@App) }
single { BackendManager(this@App, get(), get()) }
single { BackendManager(this@App, get(), get(), get()) }
single {
BackendFactory {
// uses context of the device's main user to be able to access USB storage

View file

@ -12,6 +12,7 @@ import com.stevesoltys.seedvault.getStorageContext
import com.stevesoltys.seedvault.permitDiskReads
import com.stevesoltys.seedvault.settings.SettingsManager
import com.stevesoltys.seedvault.settings.StoragePluginType
import com.stevesoltys.seedvault.transport.backup.BlobCache
import org.calyxos.seedvault.core.backends.Backend
import org.calyxos.seedvault.core.backends.BackendFactory
import org.calyxos.seedvault.core.backends.BackendProperties
@ -20,6 +21,7 @@ import org.calyxos.seedvault.core.backends.saf.SafBackend
class BackendManager(
private val context: Context,
private val settingsManager: SettingsManager,
private val blobCache: BlobCache,
backendFactory: BackendFactory,
) {
@ -86,6 +88,7 @@ class BackendManager(
settingsManager.setStorageBackend(backend)
mBackend = backend
mBackendProperties = storageProperties
blobCache.clearLocalCache()
}
/**

View file

@ -16,7 +16,6 @@ import okio.Buffer
import okio.buffer
import okio.sink
import org.calyxos.seedvault.core.backends.AppBackupFileType
import org.calyxos.seedvault.core.backends.TopLevelFolder
internal class SnapshotManager(
private val crypto: Crypto,
@ -27,39 +26,25 @@ internal class SnapshotManager(
private val log = KotlinLogging.logger {}
/**
* The latest [Snapshot]. May be stale if [loadSnapshots] has not returned
* The latest [Snapshot]. May be stale if [onSnapshotsLoaded] has not returned
* or wasn't called since new snapshots have been created.
*/
var latestSnapshot: Snapshot? = null
private set
suspend fun loadSnapshots(callback: (Snapshot) -> Unit) {
log.info { "Loading snapshots..." }
val handles = mutableListOf<AppBackupFileType.Snapshot>()
backendManager.backend.list(
topLevelFolder = TopLevelFolder(crypto.repoId),
AppBackupFileType.Snapshot::class,
) { fileInfo ->
fileInfo.fileHandle as AppBackupFileType.Snapshot
handles.add(fileInfo.fileHandle as AppBackupFileType.Snapshot)
}
handles.forEach { fileHandle ->
suspend fun onSnapshotsLoaded(handles: List<AppBackupFileType.Snapshot>): List<Snapshot> {
return handles.map { snapshotHandle ->
// TODO set up local snapshot cache, so we don't need to download those all the time
// TODO is it a fatal error when one snapshot is corrupted or couldn't get loaded?
val snapshot = onSnapshotFound(fileHandle)
callback(snapshot)
val snapshot = loader.loadFile(snapshotHandle).use { inputStream ->
Snapshot.parseFrom(inputStream)
}
// update latest snapshot if this one is more recent
if (snapshot.token > (latestSnapshot?.token ?: 0)) latestSnapshot = snapshot
snapshot
}
}
private suspend fun onSnapshotFound(snapshotHandle: AppBackupFileType.Snapshot): Snapshot {
// TODO set up local snapshot cache, so we don't need to download those all the time
val snapshot = loader.loadFile(snapshotHandle).use { inputStream ->
Snapshot.parseFrom(inputStream)
}
// update latest snapshot if this one is more recent
if (snapshot.token > (latestSnapshot?.token ?: 0)) latestSnapshot = snapshot
return snapshot
}
suspend fun saveSnapshot(snapshot: Snapshot) {
val buffer = Buffer()
val bufferStream = buffer.outputStream()

View file

@ -5,13 +5,21 @@
package com.stevesoltys.seedvault.transport.backup
import com.stevesoltys.seedvault.backend.BackendManager
import com.stevesoltys.seedvault.crypto.Crypto
import com.stevesoltys.seedvault.settings.SettingsManager
import com.stevesoltys.seedvault.transport.SnapshotManager
import io.github.oshai.kotlinlogging.KotlinLogging
import kotlinx.coroutines.delay
import org.calyxos.seedvault.core.backends.AppBackupFileType.Blob
import org.calyxos.seedvault.core.backends.AppBackupFileType.Snapshot
import org.calyxos.seedvault.core.backends.FileInfo
import org.calyxos.seedvault.core.backends.TopLevelFolder
internal class AppBackupManager(
private val blobsCache: BlobsCache,
private val crypto: Crypto,
private val blobCache: BlobCache,
private val backendManager: BackendManager,
private val settingsManager: SettingsManager,
private val snapshotManager: SnapshotManager,
private val snapshotCreatorFactory: SnapshotCreatorFactory,
@ -22,22 +30,42 @@ internal class AppBackupManager(
private set
suspend fun beforeBackup() {
log.info { "Before backup" }
log.info { "Loading existing snapshots and blobs..." }
val blobInfos = mutableListOf<FileInfo>()
val snapshotHandles = mutableListOf<Snapshot>()
backendManager.backend.list(
topLevelFolder = TopLevelFolder(crypto.repoId),
Blob::class, Snapshot::class,
) { fileInfo ->
when (fileInfo.fileHandle) {
is Blob -> blobInfos.add(fileInfo)
is Snapshot -> snapshotHandles.add(fileInfo.fileHandle as Snapshot)
else -> error("Unexpected FileHandle: $fileInfo")
}
}
snapshotCreator = snapshotCreatorFactory.createSnapshotCreator()
blobsCache.populateCache()
val snapshots = snapshotManager.onSnapshotsLoaded(snapshotHandles)
blobCache.populateCache(blobInfos, snapshots)
}
suspend fun afterBackupFinished(success: Boolean) {
log.info { "After backup finished. Success: $success" }
blobsCache.clear()
if (success) {
val snapshot = snapshotCreator?.finalizeSnapshot() ?: error("Had no snapshotCreator")
keepTrying {
snapshotManager.saveSnapshot(snapshot)
// free up memory by clearing blobs cache
blobCache.clear()
try {
if (success) {
val snapshot =
snapshotCreator?.finalizeSnapshot() ?: error("Had no snapshotCreator")
keepTrying {
snapshotManager.saveSnapshot(snapshot)
}
settingsManager.token = snapshot.token
// after snapshot was written, we can clear local cache as its info is in snapshot
blobCache.clearLocalCache()
}
settingsManager.token = snapshot.token
} finally {
snapshotCreator = null
}
snapshotCreator = null
}
private suspend fun keepTrying(n: Int = 3, block: suspend () -> Unit) {

View file

@ -12,7 +12,7 @@ import org.koin.dsl.module
val backupModule = module {
single { BackupInitializer(get()) }
single { BackupReceiver(get(), get(), get()) }
single { BlobsCache(get(), get(), get()) }
single { BlobCache(androidContext()) }
single { BlobCreator(get(), get()) }
single { SnapshotManager(get(), get(), get()) }
single { SnapshotCreatorFactory(androidContext(), get(), get(), get()) }

View file

@ -21,7 +21,7 @@ data class BackupData(
}
internal class BackupReceiver(
private val blobsCache: BlobsCache,
private val blobCache: BlobCache,
private val blobCreator: BlobCreator,
private val crypto: Crypto,
private val replaceableChunker: Chunker? = null,
@ -89,11 +89,11 @@ internal class BackupReceiver(
private suspend fun onNewChunk(chunk: Chunk) {
chunks.add(chunk.hash)
val existingBlob = blobsCache.getBlob(chunk.hash)
val existingBlob = blobCache[chunk.hash]
if (existingBlob == null) {
val blob = blobCreator.createNewBlob(chunk)
chunkMap[chunk.hash] = blob
blobsCache.saveNewBlob(chunk.hash, blob)
blobCache.saveNewBlob(chunk.hash, blob)
} else {
chunkMap[chunk.hash] = existingBlob
}

View file

@ -0,0 +1,163 @@
/*
* SPDX-FileCopyrightText: 2024 The Calyx Institute
* SPDX-License-Identifier: Apache-2.0
*/
package com.stevesoltys.seedvault.transport.backup
import android.content.Context
import android.content.Context.MODE_APPEND
import com.stevesoltys.seedvault.proto.Snapshot
import com.stevesoltys.seedvault.proto.Snapshot.Blob
import io.github.oshai.kotlinlogging.KotlinLogging
import org.calyxos.seedvault.core.backends.FileInfo
import org.calyxos.seedvault.core.toByteArrayFromHex
import org.calyxos.seedvault.core.toHexString
import java.io.FileNotFoundException
import java.io.IOException
private const val CACHE_FILE_NAME = "blobsCache"
/**
* Responsible for caching blobs during a backup run,
* so we can know that a blob for the given chunk ID already exists
* and does not need to be uploaded again.
*
* It builds up its cache from snapshots available on the backend
* and from the persistent cache that includes blobs that could not be added to a snapshot,
* because the backup was aborted.
*/
class BlobCache(
private val context: Context,
) {
private val log = KotlinLogging.logger {}
private val blobMap = mutableMapOf<String, Blob>()
/**
* This must be called before saving files to the backend to avoid uploading duplicate blobs.
*/
@Throws(IOException::class)
fun populateCache(blobs: List<FileInfo>, snapshots: List<Snapshot>) {
log.info { "Getting all blobs from backend..." }
blobMap.clear()
// create map of blobId to size of blob on backend
val blobIds = blobs.associate {
Pair(it.fileHandle.name, it.size.toInt())
}
// load local blob cache and include only blobs on backend
loadPersistentBlobCache(blobIds)
// build up mapping from chunkId to blob from available snapshots
snapshots.forEach { snapshot ->
onSnapshotLoaded(snapshot, blobIds)
}
}
/**
* Should only be called after [populateCache] has returned.
*/
operator fun get(chunkId: String): Blob? = blobMap[chunkId]
/**
* Should get called for all new blobs as soon as they've been saved to the backend.
*/
fun saveNewBlob(chunkId: String, blob: Blob) {
val previous = blobMap.put(chunkId, blob)
if (previous == null) {
// persist this new blob locally in case backup gets interrupted
context.openFileOutput(CACHE_FILE_NAME, MODE_APPEND).use { outputStream ->
outputStream.write(chunkId.toByteArrayFromHex())
blob.writeDelimitedTo(outputStream)
}
}
}
/**
* Clears the cached blob mapping.
* Should be called after a backup run to free up memory.
*/
fun clear() {
log.info { "Clearing cache..." }
blobMap.clear()
}
/**
* Clears the local cache.
* Should get called after
* * changing to a different backup to prevent usage of blobs that don't exist there
* * uploading a new snapshot to prevent the persistent cache from growing indefinitely
*/
fun clearLocalCache() {
log.info { "Clearing local cache..." }
context.deleteFile(CACHE_FILE_NAME)
}
/**
* Loads persistent cache from disk and adds blobs to [blobMap]
* if available in [allowedBlobIds] with the right size.
*/
private fun loadPersistentBlobCache(allowedBlobIds: Map<String, Int>) {
try {
context.openFileInput(CACHE_FILE_NAME).use { inputStream ->
val chunkIdBytes = ByteArray(32)
while (true) {
val bytesRead = inputStream.read(chunkIdBytes)
if (bytesRead != 32) break
val chunkId = chunkIdBytes.toHexString()
// parse blob
val blob = Blob.parseDelimitedFrom(inputStream)
val blobId = blob.id.hexFromProto()
// include blob only if size is equal to size on backend
val sizeOnBackend = allowedBlobIds[blobId]
if (sizeOnBackend == blob.length) {
blobMap[chunkId] = blob
} else log.warn {
if (sizeOnBackend == null) {
"Cached blob $blobId is missing from backend."
} else {
"Cached blob $blobId had different size on backend: $sizeOnBackend"
}
}
}
}
} catch (e: Exception) {
if (e is FileNotFoundException) log.info { "No local blob cache found." }
else {
// If the local cache is corrupted, that's not the end of the world.
// We can still continue normally,
// but may be writing out duplicated blobs we can't re-use.
// Those will get deleted again when pruning.
// So swallow the exception.
log.error(e) { "Error loading blobs cache: " }
}
}
}
/**
* Used for populating local [blobMap] cache.
* Adds mapping from chunkId to [Blob], if it exists on backend, i.e. part of [blobIds]
* and its size matches the one on backend, i.e. value of [blobIds].
*/
private fun onSnapshotLoaded(snapshot: Snapshot, blobIds: Map<String, Int>) {
snapshot.blobsMap.forEach { (chunkId, blob) ->
// check if referenced blob still exists on backend
val blobId = blob.id.hexFromProto()
val sizeOnBackend = blobIds[blobId]
if (sizeOnBackend == blob.length) {
// only add blob to our mapping, if it still exists
blobMap.putIfAbsent(chunkId, blob)?.let { previous ->
if (previous.id != blob.id) log.warn {
"Chunk ID ${chunkId.substring(0..5)} had more than one blob."
}
}
} else log.warn {
if (sizeOnBackend == null) {
"Blob $blobId in snapshot ${snapshot.token} is missing."
} else {
"Blob $blobId has unexpected size: $sizeOnBackend"
}
}
}
}
}

View file

@ -1,69 +0,0 @@
/*
* SPDX-FileCopyrightText: 2024 The Calyx Institute
* SPDX-License-Identifier: Apache-2.0
*/
package com.stevesoltys.seedvault.transport.backup
import com.stevesoltys.seedvault.backend.BackendManager
import com.stevesoltys.seedvault.crypto.Crypto
import com.stevesoltys.seedvault.proto.Snapshot.Blob
import com.stevesoltys.seedvault.transport.SnapshotManager
import io.github.oshai.kotlinlogging.KotlinLogging
import org.calyxos.seedvault.core.backends.AppBackupFileType
import org.calyxos.seedvault.core.backends.TopLevelFolder
internal class BlobsCache(
private val crypto: Crypto,
private val backendManager: BackendManager,
private val snapshotManager: SnapshotManager,
) {
private val log = KotlinLogging.logger {}
private val blobMap = mutableMapOf<String, Blob>()
/**
* This must be called before saving files to the backend to avoid uploading duplicate blobs.
*/
suspend fun populateCache() {
log.info { "Getting all blobs from backend..." }
blobMap.clear()
val blobs = mutableSetOf<String>()
backendManager.backend.list(
topLevelFolder = TopLevelFolder(crypto.repoId),
AppBackupFileType.Blob::class,
) { fileInfo ->
fileInfo.fileHandle as AppBackupFileType.Blob
// TODO we could save size info here and later check it is as expected
blobs.add(fileInfo.fileHandle.name)
}
snapshotManager.loadSnapshots { snapshot ->
snapshot.blobsMap.forEach { (chunkId, blob) ->
// check if referenced blob still exists on backend
if (blobs.contains(blob.id.hexFromProto())) {
// only add blob to our mapping, if it still exists
blobMap.putIfAbsent(chunkId, blob)?.let { previous ->
if (previous.id != blob.id) log.warn {
"Chunk ID ${chunkId.substring(0..5)} had more than one blob"
}
}
} else log.warn {
"Blob ${blob.id.hexFromProto()} referenced in snapshot ${snapshot.token}"
}
}
}
}
fun getBlob(hash: String): Blob? = blobMap[hash]
fun saveNewBlob(chunkId: String, blob: Blob) {
blobMap[chunkId] = blob
// TODO persist this blob locally in case backup gets interrupted
}
fun clear() {
log.info { "Clearing cache..." }
blobMap.clear()
}
}

View file

@ -22,6 +22,7 @@ import androidx.work.OutOfQuotaPolicy.RUN_AS_NON_EXPEDITED_WORK_REQUEST
import androidx.work.PeriodicWorkRequestBuilder
import androidx.work.WorkManager
import androidx.work.WorkerParameters
import com.stevesoltys.seedvault.R
import com.stevesoltys.seedvault.backend.BackendManager
import com.stevesoltys.seedvault.settings.SettingsManager
import com.stevesoltys.seedvault.transport.backup.AppBackupManager
@ -168,7 +169,7 @@ class AppBackupWorker(
private fun createForegroundInfo() = ForegroundInfo(
NOTIFICATION_ID_OBSERVER,
nm.getBackupNotification(""),
nm.getBackupNotification(applicationContext.getString(R.string.notification_init_text)),
FOREGROUND_SERVICE_TYPE_DATA_SYNC,
)
}

View file

@ -27,7 +27,7 @@ val workerModule = module {
appBackupManager = get(),
)
}
single { AppBackupManager(get(), get(), get(), get()) }
single { AppBackupManager(get(), get(), get(), get(), get(), get()) }
single {
ApkBackup(
pm = androidContext().packageManager,

View file

@ -152,6 +152,7 @@
<string name="notification_channel_title">Backup notification</string>
<string name="notification_success_channel_title">Success notification</string>
<string name="notification_title">Backup running</string>
<string name="notification_init_text">Preparing existing backup data for re-use…</string>
<string name="notification_apk_text">Backing up APK of %s</string>
<string name="notification_apk_not_backed_up">Saving list of apps we can not back up.</string>
<string name="notification_backup_already_running">Backup already in progress</string>

View file

@ -162,8 +162,8 @@ internal class ApkBackupRestoreTest : TransportTest() {
val apkPath = slot<String>()
val cacheFiles = slot<List<File>>()
val repoId = getRandomString()
val apkHandle = AppBackupFileType.Blob(repoId, apkBlob.id.hexFromProto())
val splitHandle = AppBackupFileType.Blob(repoId, splitBlob.id.hexFromProto())
val apkHandle = AppBackupFileType.Blob(repoId, blob1.id.hexFromProto())
val splitHandle = AppBackupFileType.Blob(repoId, blob2.id.hexFromProto())
every { backend.providerPackageName } returns storageProviderPackageName
every { installRestriction.isAllowedToInstallApks() } returns true

View file

@ -6,7 +6,6 @@
package com.stevesoltys.seedvault.transport
import com.stevesoltys.seedvault.backend.BackendManager
import com.stevesoltys.seedvault.proto.Snapshot
import com.stevesoltys.seedvault.transport.restore.Loader
import io.mockk.coEvery
import io.mockk.every
@ -15,8 +14,6 @@ import io.mockk.slot
import kotlinx.coroutines.runBlocking
import org.calyxos.seedvault.core.backends.AppBackupFileType
import org.calyxos.seedvault.core.backends.Backend
import org.calyxos.seedvault.core.backends.FileInfo
import org.calyxos.seedvault.core.backends.TopLevelFolder
import org.calyxos.seedvault.core.toByteArrayFromHex
import org.calyxos.seedvault.core.toHexString
import org.junit.jupiter.api.Assertions.assertEquals
@ -63,18 +60,8 @@ internal class SnapshotManagerTest : TransportTest() {
snapshotHandle.captured.hash,
)
val fileInfo = FileInfo(snapshotHandle.captured, Random.nextLong())
assertTrue(outputStream.size() > 0)
val inputStream = ByteArrayInputStream(outputStream.toByteArray())
coEvery {
backend.list(
topLevelFolder = TopLevelFolder(repoId),
AppBackupFileType.Snapshot::class,
callback = captureLambda<(FileInfo) -> Unit>()
)
} answers {
lambda<(FileInfo) -> Unit>().captured.invoke(fileInfo)
}
coEvery { backend.load(snapshotHandle.captured) } returns inputStream
every {
crypto.sha256(outputStream.toByteArray())
@ -83,8 +70,9 @@ internal class SnapshotManagerTest : TransportTest() {
passThroughInputStream.captured
}
var loadedSnapshot: Snapshot? = null
snapshotManager.loadSnapshots { loadedSnapshot = it }
assertEquals(snapshot, loadedSnapshot)
snapshotManager.onSnapshotsLoaded(listOf(snapshotHandle.captured)).let { snapshots ->
assertEquals(1, snapshots.size)
assertEquals(snapshot, snapshots[0])
}
}
}

View file

@ -39,6 +39,7 @@ import io.mockk.mockk
import io.mockk.mockkStatic
import io.mockk.slot
import org.calyxos.seedvault.core.backends.AppBackupFileType
import org.calyxos.seedvault.core.backends.FileInfo
import org.calyxos.seedvault.core.backends.LegacyAppBackupFile
import org.calyxos.seedvault.core.toHexString
import org.junit.jupiter.api.TestInstance
@ -80,17 +81,29 @@ internal abstract class TransportTest {
protected val splitBytes = byteArrayOf(0x07, 0x08, 0x09)
protected val chunkId1 = Random.nextBytes(32).toHexString()
protected val chunkId2 = Random.nextBytes(32).toHexString()
protected val apkBlob = blob {
protected val blob1 = blob {
id = ByteString.copyFrom(Random.nextBytes(32))
length = Random.nextInt(0, Int.MAX_VALUE)
uncompressedLength = Random.nextInt(0, Int.MAX_VALUE)
}
protected val splitBlob = blob {
protected val blob2 = blob {
id = ByteString.copyFrom(Random.nextBytes(32))
length = Random.nextInt(0, Int.MAX_VALUE)
uncompressedLength = Random.nextInt(0, Int.MAX_VALUE)
}
protected val blobHandle1 = AppBackupFileType.Blob(repoId, apkBlob.id.hexFromProto())
protected val blobHandle2 = AppBackupFileType.Blob(repoId, splitBlob.id.hexFromProto())
protected val apkBackupData = BackupData(listOf(chunkId1), mapOf(chunkId1 to apkBlob))
protected val blobHandle1 = AppBackupFileType.Blob(repoId, blob1.id.hexFromProto())
protected val blobHandle2 = AppBackupFileType.Blob(repoId, blob2.id.hexFromProto())
protected val fileInfo1 = FileInfo(
fileHandle = blobHandle1,
size = blob1.length.toLong(),
)
protected val fileInfo2 = FileInfo(
fileHandle = blobHandle2,
size = blob2.length.toLong(),
)
protected val apkBackupData = BackupData(listOf(chunkId1), mapOf(chunkId1 to blob1))
protected val splitBackupData =
BackupData(listOf(chunkId2), mapOf(chunkId2 to splitBlob))
BackupData(listOf(chunkId2), mapOf(chunkId2 to blob2))
protected val chunkMap = apkBackupData.chunkMap + splitBackupData.chunkMap
protected val baseSplit = split {
name = BASE_SPLIT

View file

@ -0,0 +1,144 @@
/*
* SPDX-FileCopyrightText: 2024 The Calyx Institute
* SPDX-License-Identifier: Apache-2.0
*/
package com.stevesoltys.seedvault.transport.backup
import android.content.Context
import com.stevesoltys.seedvault.transport.TransportTest
import io.mockk.every
import io.mockk.mockk
import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Assertions.assertNull
import org.junit.jupiter.api.Test
import org.junit.jupiter.api.io.TempDir
import java.io.File
import java.io.FileOutputStream
import java.nio.file.Path
internal class BlobCacheTest : TransportTest() {
private val strictContext: Context = mockk()
private val blobCache = BlobCache(context)
@Test
fun `write to and read from cache`(@TempDir tmpDir: Path) {
val file = File(tmpDir.toString(), "tmpCache")
BlobCache(strictContext).saveTwoBlobsToCache(file)
BlobCache(strictContext).let { cache ->
// old blobs are not yet in new cache
assertNull(cache[chunkId1])
assertNull(cache[chunkId2])
// read saved blobs from cache
every { strictContext.openFileInput(any()) } returns file.inputStream()
cache.populateCache(listOf(fileInfo1, fileInfo2), emptyList())
// now both blobs are in the map
assertEquals(blob1, cache[chunkId1])
assertEquals(blob2, cache[chunkId2])
// after clearing, blobs are gone
cache.clear()
assertNull(cache[chunkId1])
assertNull(cache[chunkId2])
}
}
@Test
fun `cached blob gets only used if on backend`(@TempDir tmpDir: Path) {
val file = File(tmpDir.toString(), "tmpCache")
BlobCache(strictContext).saveTwoBlobsToCache(file)
BlobCache(strictContext).let { cache ->
// read saved blobs from cache
every { strictContext.openFileInput(any()) } returns file.inputStream()
cache.populateCache(listOf(fileInfo2), emptyList()) // fileInfo1 is missing
// now only blob2 gets used, because blob1 wasn't on backend
assertNull(cache[chunkId1])
assertEquals(blob2, cache[chunkId2])
}
}
@Test
fun `cached blob gets only used if same size on backend`(@TempDir tmpDir: Path) {
val file = File(tmpDir.toString(), "tmpCache")
BlobCache(strictContext).saveTwoBlobsToCache(file)
val info = fileInfo1.copy(size = fileInfo1.size - 1)
BlobCache(strictContext).let { cache ->
// read saved blobs from cache
every { strictContext.openFileInput(any()) } returns file.inputStream()
cache.populateCache(listOf(info, fileInfo2), emptyList()) // info has different size now
// now only blob2 gets used, because blob1 wasn't on backend
assertNull(cache[chunkId1])
assertEquals(blob2, cache[chunkId2])
}
}
@Test
fun `blobs from snapshot get added to cache`() {
assertEquals(blob1, snapshot.blobsMap[chunkId1])
assertEquals(blob2, snapshot.blobsMap[chunkId2])
// before populating cache, the blobs are not in
assertNull(blobCache[chunkId1])
assertNull(blobCache[chunkId2])
blobCache.populateCache(listOf(fileInfo1, fileInfo2), listOf(snapshot))
// after populating cache, the blobs are in
assertEquals(blob1, blobCache[chunkId1])
assertEquals(blob2, blobCache[chunkId2])
// clearing cache removes blobs
blobCache.clear()
assertNull(blobCache[chunkId1])
assertNull(blobCache[chunkId2])
}
@Test
fun `blobs from snapshot get added to cache only if on backend`() {
blobCache.populateCache(listOf(fileInfo2), listOf(snapshot))
// after populating cache, only second blob is in
assertNull(blobCache[chunkId1])
assertEquals(blob2, blobCache[chunkId2])
}
@Test
fun `blobs from snapshot get added to cache only if same size on backend`() {
val info = fileInfo1.copy(size = fileInfo1.size - 1) // same blob, different size
blobCache.populateCache(listOf(info, fileInfo2), listOf(snapshot))
// after populating cache, only second blob is in
assertNull(blobCache[chunkId1])
assertEquals(blob2, blobCache[chunkId2])
}
@Test
fun `test clearing loading cache`() {
// clearing the local cache, deletes the cache file
every { strictContext.deleteFile(any()) } returns true
blobCache.clearLocalCache()
}
private fun BlobCache.saveTwoBlobsToCache(file: File) {
every { strictContext.openFileOutput(any(), any()) } answers {
FileOutputStream(file, true)
}
// save new blobs (using a new output stream for each as it gets closed)
saveNewBlob(chunkId1, blob1)
saveNewBlob(chunkId2, blob2)
// clearing cache should affect persisted blobs
clear()
}
}