WebDAV files backup plugin
This commit is contained in:
parent
870d1617d2
commit
e6e65d0dd1
22 changed files with 657 additions and 195 deletions
|
@ -0,0 +1,179 @@
|
|||
/*
|
||||
* SPDX-FileCopyrightText: 2024 The Calyx Institute
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package com.stevesoltys.seedvault.plugins.webdav
|
||||
|
||||
import android.util.Log
|
||||
import at.bitfire.dav4jvm.BasicDigestAuthHandler
|
||||
import at.bitfire.dav4jvm.DavCollection
|
||||
import at.bitfire.dav4jvm.Response
|
||||
import at.bitfire.dav4jvm.property.ResourceType
|
||||
import kotlinx.coroutines.DelicateCoroutinesApi
|
||||
import kotlinx.coroutines.Dispatchers
|
||||
import kotlinx.coroutines.GlobalScope
|
||||
import kotlinx.coroutines.async
|
||||
import kotlinx.coroutines.launch
|
||||
import kotlinx.coroutines.runBlocking
|
||||
import okhttp3.ConnectionSpec
|
||||
import okhttp3.HttpUrl
|
||||
import okhttp3.MediaType.Companion.toMediaType
|
||||
import okhttp3.OkHttpClient
|
||||
import okhttp3.RequestBody
|
||||
import okhttp3.internal.closeQuietly
|
||||
import okio.BufferedSink
|
||||
import java.io.IOException
|
||||
import java.io.InputStream
|
||||
import java.io.OutputStream
|
||||
import java.io.PipedInputStream
|
||||
import java.io.PipedOutputStream
|
||||
import java.util.concurrent.TimeUnit
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.suspendCoroutine
|
||||
|
||||
const val DEBUG_LOG = true
|
||||
const val DIRECTORY_ROOT = ".SeedVaultAndroidBackup"
|
||||
|
||||
@OptIn(DelicateCoroutinesApi::class)
|
||||
internal abstract class WebDavStorage(
|
||||
webDavConfig: WebDavConfig,
|
||||
) {
|
||||
|
||||
companion object {
|
||||
val TAG: String = WebDavStorage::class.java.simpleName
|
||||
}
|
||||
|
||||
private val authHandler = BasicDigestAuthHandler(
|
||||
domain = null, // Optional, to only authenticate against hosts with this domain.
|
||||
username = webDavConfig.username,
|
||||
password = webDavConfig.password,
|
||||
)
|
||||
protected val okHttpClient = OkHttpClient.Builder()
|
||||
.followRedirects(false)
|
||||
.authenticator(authHandler)
|
||||
.addNetworkInterceptor(authHandler)
|
||||
.connectTimeout(15, TimeUnit.SECONDS)
|
||||
.writeTimeout(30, TimeUnit.SECONDS)
|
||||
.readTimeout(120, TimeUnit.SECONDS)
|
||||
.pingInterval(45, TimeUnit.SECONDS)
|
||||
.connectionSpecs(listOf(ConnectionSpec.MODERN_TLS))
|
||||
.retryOnConnectionFailure(true)
|
||||
.build()
|
||||
|
||||
protected val url = "${webDavConfig.url}/$DIRECTORY_ROOT"
|
||||
|
||||
@Throws(IOException::class)
|
||||
protected suspend fun getOutputStream(location: HttpUrl): OutputStream {
|
||||
val davCollection = DavCollection(okHttpClient, location)
|
||||
|
||||
val pipedInputStream = PipedInputStream()
|
||||
val pipedOutputStream = PipedCloseActionOutputStream(pipedInputStream)
|
||||
|
||||
val body = object : RequestBody() {
|
||||
override fun contentType() = "application/octet-stream".toMediaType()
|
||||
override fun writeTo(sink: BufferedSink) {
|
||||
pipedInputStream.use { inputStream ->
|
||||
sink.outputStream().use { outputStream ->
|
||||
inputStream.copyTo(outputStream)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
val deferred = GlobalScope.async(Dispatchers.IO) {
|
||||
davCollection.put(body) { response ->
|
||||
debugLog { "getOutputStream($location) = $response" }
|
||||
}
|
||||
}
|
||||
pipedOutputStream.doOnClose {
|
||||
runBlocking { // blocking i/o wait
|
||||
deferred.await()
|
||||
}
|
||||
}
|
||||
return pipedOutputStream
|
||||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
protected fun getInputStream(location: HttpUrl): InputStream {
|
||||
val davCollection = DavCollection(okHttpClient, location)
|
||||
|
||||
val pipedInputStream = PipedExceptionInputStream()
|
||||
val pipedOutputStream = PipedOutputStream(pipedInputStream)
|
||||
|
||||
GlobalScope.launch(Dispatchers.IO) {
|
||||
try {
|
||||
davCollection.get(accept = "", headers = null) { response ->
|
||||
val inputStream = response.body?.byteStream()
|
||||
?: throw IOException("No response body")
|
||||
debugLog { "getInputStream($location) = $response" }
|
||||
pipedOutputStream.use { outputStream ->
|
||||
inputStream.copyTo(outputStream)
|
||||
}
|
||||
}
|
||||
} catch (e: Exception) {
|
||||
debugLog { "Exception while getting input stream: $e" }
|
||||
// pass exception to stream, so it gets thrown when stream is closed
|
||||
// if we'd just throw it here, it would be uncaught, on a different thread
|
||||
pipedInputStream.throwable = e
|
||||
pipedOutputStream.closeQuietly()
|
||||
}
|
||||
}
|
||||
return pipedInputStream
|
||||
}
|
||||
|
||||
protected suspend fun DavCollection.createFolder(xmlBody: String? = null): okhttp3.Response {
|
||||
return try {
|
||||
suspendCoroutine { cont ->
|
||||
mkCol(xmlBody) { response ->
|
||||
cont.resume(response)
|
||||
}
|
||||
}
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException(e)
|
||||
}
|
||||
}
|
||||
|
||||
protected inline fun debugLog(block: () -> String) {
|
||||
if (DEBUG_LOG) Log.d(TAG, block())
|
||||
}
|
||||
|
||||
protected fun Response.isFolder(): Boolean {
|
||||
return this[ResourceType::class.java]?.types?.contains(ResourceType.COLLECTION) == true
|
||||
}
|
||||
|
||||
private class PipedCloseActionOutputStream(
|
||||
inputStream: PipedInputStream,
|
||||
) : PipedOutputStream(inputStream) {
|
||||
|
||||
private var onClose: (() -> Unit)? = null
|
||||
|
||||
@Throws(IOException::class)
|
||||
override fun close() {
|
||||
super.close()
|
||||
try {
|
||||
onClose?.invoke()
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException(e)
|
||||
}
|
||||
}
|
||||
|
||||
fun doOnClose(function: () -> Unit) {
|
||||
this.onClose = function
|
||||
}
|
||||
}
|
||||
|
||||
private class PipedExceptionInputStream : PipedInputStream() {
|
||||
var throwable: Throwable? = null
|
||||
|
||||
override fun close() {
|
||||
super.close()
|
||||
throwable?.let { e ->
|
||||
if (e is IOException) throw e
|
||||
else throw IOException(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -2,13 +2,11 @@ package com.stevesoltys.seedvault.plugins.webdav
|
|||
|
||||
import android.content.Context
|
||||
import android.util.Log
|
||||
import at.bitfire.dav4jvm.BasicDigestAuthHandler
|
||||
import at.bitfire.dav4jvm.DavCollection
|
||||
import at.bitfire.dav4jvm.Response.HrefRelation.SELF
|
||||
import at.bitfire.dav4jvm.exception.NotFoundException
|
||||
import at.bitfire.dav4jvm.property.DisplayName
|
||||
import at.bitfire.dav4jvm.property.ResourceType
|
||||
import at.bitfire.dav4jvm.property.ResourceType.Companion.COLLECTION
|
||||
import com.stevesoltys.seedvault.plugins.EncryptedMetadata
|
||||
import com.stevesoltys.seedvault.plugins.StoragePlugin
|
||||
import com.stevesoltys.seedvault.plugins.chunkFolderRegex
|
||||
|
@ -16,65 +14,25 @@ import com.stevesoltys.seedvault.plugins.saf.FILE_BACKUP_METADATA
|
|||
import com.stevesoltys.seedvault.plugins.saf.FILE_NO_MEDIA
|
||||
import com.stevesoltys.seedvault.plugins.tokenRegex
|
||||
import com.stevesoltys.seedvault.settings.Storage
|
||||
import kotlinx.coroutines.DelicateCoroutinesApi
|
||||
import kotlinx.coroutines.Dispatchers
|
||||
import kotlinx.coroutines.GlobalScope
|
||||
import kotlinx.coroutines.async
|
||||
import kotlinx.coroutines.launch
|
||||
import kotlinx.coroutines.runBlocking
|
||||
import okhttp3.HttpUrl.Companion.toHttpUrl
|
||||
import okhttp3.MediaType.Companion.toMediaType
|
||||
import okhttp3.OkHttpClient
|
||||
import okhttp3.RequestBody
|
||||
import okio.BufferedSink
|
||||
import java.io.IOException
|
||||
import java.io.InputStream
|
||||
import java.io.OutputStream
|
||||
import java.io.PipedInputStream
|
||||
import java.io.PipedOutputStream
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.suspendCoroutine
|
||||
|
||||
private val TAG = WebDavStoragePlugin::class.java.simpleName
|
||||
const val DEBUG_LOG = true
|
||||
const val DIRECTORY_ROOT = ".SeedVaultAndroidBackup"
|
||||
|
||||
@OptIn(DelicateCoroutinesApi::class)
|
||||
@Suppress("BlockingMethodInNonBlockingContext")
|
||||
internal class WebDavStoragePlugin(
|
||||
context: Context,
|
||||
webDavConfig: WebDavConfig,
|
||||
) : StoragePlugin {
|
||||
|
||||
private val authHandler = BasicDigestAuthHandler(
|
||||
domain = null, // Optional, to only authenticate against hosts with this domain.
|
||||
username = webDavConfig.username,
|
||||
password = webDavConfig.password,
|
||||
)
|
||||
private val okHttpClient = OkHttpClient.Builder()
|
||||
.followRedirects(false)
|
||||
.authenticator(authHandler)
|
||||
.addNetworkInterceptor(authHandler)
|
||||
.build()
|
||||
|
||||
private val url = "${webDavConfig.url}/$DIRECTORY_ROOT"
|
||||
) : WebDavStorage(webDavConfig), StoragePlugin {
|
||||
|
||||
@Throws(IOException::class)
|
||||
override suspend fun startNewRestoreSet(token: Long) {
|
||||
try {
|
||||
val location = "$url/$token".toHttpUrl()
|
||||
val davCollection = DavCollection(okHttpClient, location)
|
||||
val location = "$url/$token".toHttpUrl()
|
||||
val davCollection = DavCollection(okHttpClient, location)
|
||||
|
||||
val response = suspendCoroutine { cont ->
|
||||
davCollection.mkCol(null) { response ->
|
||||
cont.resume(response)
|
||||
}
|
||||
}
|
||||
debugLog { "startNewRestoreSet($token) = $response" }
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException(e)
|
||||
}
|
||||
val response = davCollection.createFolder()
|
||||
debugLog { "startNewRestoreSet($token) = $response" }
|
||||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
|
@ -107,76 +65,26 @@ internal class WebDavStoragePlugin(
|
|||
|
||||
@Throws(IOException::class)
|
||||
override suspend fun getOutputStream(token: Long, name: String): OutputStream {
|
||||
val location = "$url/$token/$name".toHttpUrl()
|
||||
return try {
|
||||
doGetOutputStream(token, name)
|
||||
getOutputStream(location)
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException("Error getting OutputStream for $token and $name: ", e)
|
||||
}
|
||||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
private suspend fun doGetOutputStream(token: Long, name: String): OutputStream {
|
||||
val location = "$url/$token/$name".toHttpUrl()
|
||||
val davCollection = DavCollection(okHttpClient, location)
|
||||
|
||||
val pipedInputStream = PipedInputStream()
|
||||
val pipedOutputStream = PipedCloseActionOutputStream(pipedInputStream)
|
||||
|
||||
val body = object : RequestBody() {
|
||||
override fun contentType() = "application/octet-stream".toMediaType()
|
||||
override fun writeTo(sink: BufferedSink) {
|
||||
pipedInputStream.use { inputStream ->
|
||||
sink.outputStream().use { outputStream ->
|
||||
inputStream.copyTo(outputStream)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
val deferred = GlobalScope.async(Dispatchers.IO) {
|
||||
davCollection.put(body) { response ->
|
||||
debugLog { "getOutputStream($token, $name) = $response" }
|
||||
}
|
||||
}
|
||||
pipedOutputStream.doOnClose {
|
||||
runBlocking { // blocking i/o wait
|
||||
deferred.await()
|
||||
}
|
||||
}
|
||||
return pipedOutputStream
|
||||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
override suspend fun getInputStream(token: Long, name: String): InputStream {
|
||||
val location = "$url/$token/$name".toHttpUrl()
|
||||
return try {
|
||||
doGetInputStream(token, name)
|
||||
getInputStream(location)
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException("Error getting InputStream for $token and $name: ", e)
|
||||
}
|
||||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
private fun doGetInputStream(token: Long, name: String): InputStream {
|
||||
val location = "$url/$token/$name".toHttpUrl()
|
||||
val davCollection = DavCollection(okHttpClient, location)
|
||||
|
||||
val pipedInputStream = PipedInputStream()
|
||||
val pipedOutputStream = PipedOutputStream(pipedInputStream)
|
||||
|
||||
GlobalScope.launch(Dispatchers.IO) {
|
||||
davCollection.get(accept = "", headers = null) { response ->
|
||||
val inputStream = response.body?.byteStream()
|
||||
?: throw IOException("No response body")
|
||||
debugLog { "getInputStream($token, $name) = $response" }
|
||||
pipedOutputStream.use { outputStream ->
|
||||
inputStream.copyTo(outputStream)
|
||||
}
|
||||
}
|
||||
}
|
||||
return pipedInputStream
|
||||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
override suspend fun removeData(token: Long, name: String) {
|
||||
val location = "$url/$token/$name".toHttpUrl()
|
||||
|
@ -262,36 +170,6 @@ internal class WebDavStoragePlugin(
|
|||
!name.endsWith(".SeedSnap")
|
||||
}
|
||||
|
||||
private fun Response.isFolder(): Boolean {
|
||||
return this[ResourceType::class.java]?.types?.contains(COLLECTION) == true
|
||||
}
|
||||
|
||||
override val providerPackageName: String = context.packageName // 100% built-in plugin
|
||||
|
||||
private class PipedCloseActionOutputStream(
|
||||
inputStream: PipedInputStream,
|
||||
) : PipedOutputStream(inputStream) {
|
||||
|
||||
private var onClose: (() -> Unit)? = null
|
||||
|
||||
@Throws(IOException::class)
|
||||
override fun close() {
|
||||
super.close()
|
||||
try {
|
||||
onClose?.invoke()
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException(e)
|
||||
}
|
||||
}
|
||||
|
||||
fun doOnClose(function: () -> Unit) {
|
||||
this.onClose = function
|
||||
}
|
||||
}
|
||||
|
||||
private fun debugLog(block: () -> String) {
|
||||
if (DEBUG_LOG) Log.d(TAG, block())
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import com.stevesoltys.seedvault.plugins.saf.DocumentsStorage
|
|||
import org.calyxos.backup.storage.plugin.saf.SafStoragePlugin
|
||||
import javax.crypto.SecretKey
|
||||
|
||||
internal class SeedvaultStoragePlugin(
|
||||
internal class SeedvaultSafStoragePlugin(
|
||||
private val appContext: Context,
|
||||
private val storage: DocumentsStorage,
|
||||
private val keyManager: KeyManager,
|
|
@ -5,6 +5,6 @@ import org.calyxos.backup.storage.api.StoragePlugin
|
|||
import org.koin.dsl.module
|
||||
|
||||
val storageModule = module {
|
||||
single<StoragePlugin> { SeedvaultStoragePlugin(get(), get(), get()) }
|
||||
single<StoragePlugin> { SeedvaultSafStoragePlugin(get(), get(), get()) }
|
||||
single { StorageBackup(get(), get()) }
|
||||
}
|
||||
|
|
|
@ -0,0 +1,275 @@
|
|||
package com.stevesoltys.seedvault.storage
|
||||
|
||||
import android.util.Log
|
||||
import at.bitfire.dav4jvm.DavCollection
|
||||
import at.bitfire.dav4jvm.Response.HrefRelation.SELF
|
||||
import at.bitfire.dav4jvm.exception.NotFoundException
|
||||
import at.bitfire.dav4jvm.property.DisplayName
|
||||
import at.bitfire.dav4jvm.property.ResourceType
|
||||
import com.stevesoltys.seedvault.crypto.KeyManager
|
||||
import com.stevesoltys.seedvault.plugins.chunkFolderRegex
|
||||
import com.stevesoltys.seedvault.plugins.webdav.WebDavConfig
|
||||
import com.stevesoltys.seedvault.plugins.webdav.WebDavStorage
|
||||
import okhttp3.HttpUrl.Companion.toHttpUrl
|
||||
import org.calyxos.backup.storage.api.StoragePlugin
|
||||
import org.calyxos.backup.storage.api.StoredSnapshot
|
||||
import org.calyxos.backup.storage.plugin.PluginConstants.chunkRegex
|
||||
import org.calyxos.backup.storage.plugin.PluginConstants.snapshotRegex
|
||||
import org.koin.core.time.measureDuration
|
||||
import java.io.IOException
|
||||
import java.io.InputStream
|
||||
import java.io.OutputStream
|
||||
import javax.crypto.SecretKey
|
||||
import kotlin.coroutines.resume
|
||||
import kotlin.coroutines.suspendCoroutine
|
||||
|
||||
internal class WebDavStoragePlugin(
|
||||
private val keyManager: KeyManager,
|
||||
/**
|
||||
* The result of Settings.Secure.getString(context.contentResolver, Settings.Secure.ANDROID_ID)
|
||||
*/
|
||||
androidId: String,
|
||||
webDavConfig: WebDavConfig,
|
||||
) : WebDavStorage(webDavConfig), StoragePlugin {
|
||||
|
||||
/**
|
||||
* The folder name is our user ID plus .sv extension (for SeedVault).
|
||||
* The user or `androidId` is unique to each combination of app-signing key, user, and device
|
||||
* so we don't leak anything by not hashing this and can use it as is.
|
||||
*/
|
||||
private val folder: String = "$androidId.sv"
|
||||
|
||||
@Throws(IOException::class)
|
||||
override suspend fun getAvailableChunkIds(): List<String> {
|
||||
val location = "$url/$folder".toHttpUrl()
|
||||
val davCollection = DavCollection(okHttpClient, location)
|
||||
debugLog { "getAvailableChunkIds($location)" }
|
||||
|
||||
val expectedChunkFolders = (0x00..0xff).map {
|
||||
Integer.toHexString(it).padStart(2, '0')
|
||||
}.toHashSet()
|
||||
val chunkIds = ArrayList<String>()
|
||||
try {
|
||||
val duration = measureDuration {
|
||||
davCollection.propfind(
|
||||
depth = 2,
|
||||
reqProp = arrayOf(DisplayName.NAME, ResourceType.NAME),
|
||||
) { response, relation ->
|
||||
debugLog { "getAvailableChunkIds() = $response" }
|
||||
// This callback will be called for every file in the folder
|
||||
if (relation != SELF && response.isFolder()) {
|
||||
val name = response.hrefName()
|
||||
if (chunkFolderRegex.matches(name)) {
|
||||
expectedChunkFolders.remove(name)
|
||||
}
|
||||
} else if (relation != SELF && response.href.pathSize >= 2) {
|
||||
val folderName =
|
||||
response.href.pathSegments[response.href.pathSegments.size - 2]
|
||||
if (folderName != folder && chunkFolderRegex.matches(folderName)) {
|
||||
val name = response.hrefName()
|
||||
if (chunkRegex.matches(name)) chunkIds.add(name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Log.i(TAG, "Retrieving chunks took $duration")
|
||||
} catch (e: NotFoundException) {
|
||||
debugLog { "Folder not found: $location" }
|
||||
davCollection.createFolder()
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException("Error populating chunk folders: ", e)
|
||||
}
|
||||
Log.i(TAG, "Got ${chunkIds.size} available chunks")
|
||||
createMissingChunkFolders(expectedChunkFolders)
|
||||
return chunkIds
|
||||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
private suspend fun createMissingChunkFolders(
|
||||
missingChunkFolders: Set<String>,
|
||||
) {
|
||||
val s = missingChunkFolders.size
|
||||
for ((i, chunkFolderName) in missingChunkFolders.withIndex()) {
|
||||
val location = "$url/$folder/$chunkFolderName".toHttpUrl()
|
||||
val davCollection = DavCollection(okHttpClient, location)
|
||||
val response = davCollection.createFolder()
|
||||
debugLog { "Created missing folder $chunkFolderName (${i + 1}/$s) $response" }
|
||||
}
|
||||
}
|
||||
|
||||
override fun getMasterKey(): SecretKey = keyManager.getMainKey()
|
||||
override fun hasMasterKey(): Boolean = keyManager.hasMainKey()
|
||||
|
||||
@Throws(IOException::class)
|
||||
override suspend fun getChunkOutputStream(chunkId: String): OutputStream {
|
||||
val chunkFolderName = chunkId.substring(0, 2)
|
||||
val location = "$url/$folder/$chunkFolderName/$chunkId".toHttpUrl()
|
||||
debugLog { "getChunkOutputStream($location) for $chunkId" }
|
||||
return try {
|
||||
getOutputStream(location)
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException("Error getting OutputStream for $chunkId: ", e)
|
||||
}
|
||||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
override suspend fun getBackupSnapshotOutputStream(timestamp: Long): OutputStream {
|
||||
val location = "$url/$folder/$timestamp.SeedSnap".toHttpUrl()
|
||||
debugLog { "getBackupSnapshotOutputStream($location)" }
|
||||
return try {
|
||||
getOutputStream(location)
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException("Error getting OutputStream for $timestamp.SeedSnap: ", e)
|
||||
}
|
||||
}
|
||||
|
||||
/************************* Restore *******************************/
|
||||
|
||||
@Throws(IOException::class)
|
||||
override suspend fun getBackupSnapshotsForRestore(): List<StoredSnapshot> {
|
||||
val location = url.toHttpUrl()
|
||||
val davCollection = DavCollection(okHttpClient, location)
|
||||
debugLog { "getBackupSnapshotsForRestore($location)" }
|
||||
|
||||
val snapshots = ArrayList<StoredSnapshot>()
|
||||
try {
|
||||
davCollection.propfind(
|
||||
depth = 2,
|
||||
reqProp = arrayOf(DisplayName.NAME, ResourceType.NAME),
|
||||
) { response, relation ->
|
||||
debugLog { "getBackupSnapshotsForRestore() = $response" }
|
||||
// This callback will be called for every file in the folder
|
||||
if (relation != SELF && !response.isFolder()) {
|
||||
val name = response.hrefName()
|
||||
val match = snapshotRegex.matchEntire(name)
|
||||
if (match != null) {
|
||||
val timestamp = match.groupValues[1].toLong()
|
||||
val folderName =
|
||||
response.href.pathSegments[response.href.pathSegments.size - 2]
|
||||
val storedSnapshot = StoredSnapshot(folderName, timestamp)
|
||||
snapshots.add(storedSnapshot)
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException("Error getting snapshots for restore: ", e)
|
||||
}
|
||||
return snapshots
|
||||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
override suspend fun getBackupSnapshotInputStream(storedSnapshot: StoredSnapshot): InputStream {
|
||||
val timestamp = storedSnapshot.timestamp
|
||||
val location = "$url/${storedSnapshot.userId}/$timestamp.SeedSnap".toHttpUrl()
|
||||
debugLog { "getBackupSnapshotInputStream($location)" }
|
||||
return try {
|
||||
getInputStream(location)
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException("Error getting InputStream for $storedSnapshot: ", e)
|
||||
}
|
||||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
override suspend fun getChunkInputStream(
|
||||
snapshot: StoredSnapshot,
|
||||
chunkId: String,
|
||||
): InputStream {
|
||||
val chunkFolderName = chunkId.substring(0, 2)
|
||||
val location = "$url/${snapshot.userId}/$chunkFolderName/$chunkId".toHttpUrl()
|
||||
debugLog { "getChunkInputStream($location) for $chunkId" }
|
||||
return try {
|
||||
getInputStream(location)
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException("Error getting InputStream for $chunkFolderName/$chunkId: ", e)
|
||||
}
|
||||
}
|
||||
|
||||
/************************* Pruning *******************************/
|
||||
|
||||
@Throws(IOException::class)
|
||||
override suspend fun getCurrentBackupSnapshots(): List<StoredSnapshot> {
|
||||
val location = "$url/$folder".toHttpUrl()
|
||||
val davCollection = DavCollection(okHttpClient, location)
|
||||
debugLog { "getCurrentBackupSnapshots($location)" }
|
||||
|
||||
val snapshots = ArrayList<StoredSnapshot>()
|
||||
try {
|
||||
val duration = measureDuration {
|
||||
davCollection.propfind(
|
||||
depth = 1,
|
||||
reqProp = arrayOf(DisplayName.NAME, ResourceType.NAME),
|
||||
) { response, relation ->
|
||||
debugLog { "getCurrentBackupSnapshots() = $response" }
|
||||
// This callback will be called for every file in the folder
|
||||
if (relation != SELF && !response.isFolder()) {
|
||||
val match = snapshotRegex.matchEntire(response.hrefName())
|
||||
if (match != null) {
|
||||
val timestamp = match.groupValues[1].toLong()
|
||||
val folderName =
|
||||
response.href.pathSegments[response.href.pathSegments.size - 2]
|
||||
val storedSnapshot = StoredSnapshot(folderName, timestamp)
|
||||
snapshots.add(storedSnapshot)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Log.i(TAG, "getCurrentBackupSnapshots took $duration")
|
||||
} catch (e: NotFoundException) {
|
||||
debugLog { "Folder not found: $location" }
|
||||
davCollection.createFolder()
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException("Error getting current snapshots: ", e)
|
||||
}
|
||||
Log.i(TAG, "Got ${snapshots.size} snapshots.")
|
||||
return snapshots
|
||||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
override suspend fun deleteBackupSnapshot(storedSnapshot: StoredSnapshot) {
|
||||
val timestamp = storedSnapshot.timestamp
|
||||
Log.d(TAG, "Deleting snapshot $timestamp")
|
||||
|
||||
val location = "$url/${storedSnapshot.userId}/$timestamp.SeedSnap".toHttpUrl()
|
||||
val davCollection = DavCollection(okHttpClient, location)
|
||||
|
||||
try {
|
||||
val response = suspendCoroutine { cont ->
|
||||
davCollection.delete { response ->
|
||||
cont.resume(response)
|
||||
}
|
||||
}
|
||||
debugLog { "deleteBackupSnapshot() = $response" }
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException(e)
|
||||
}
|
||||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
override suspend fun deleteChunks(chunkIds: List<String>) {
|
||||
chunkIds.forEach { chunkId ->
|
||||
val chunkFolderName = chunkId.substring(0, 2)
|
||||
val location = "$url/$folder/$chunkFolderName/$chunkId".toHttpUrl()
|
||||
val davCollection = DavCollection(okHttpClient, location)
|
||||
|
||||
try {
|
||||
val response = suspendCoroutine { cont ->
|
||||
davCollection.delete { response ->
|
||||
cont.resume(response)
|
||||
}
|
||||
}
|
||||
debugLog { "deleteChunks($chunkId) = $response" }
|
||||
} catch (e: Exception) {
|
||||
if (e is IOException) throw e
|
||||
else throw IOException(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -14,7 +14,9 @@ import com.stevesoltys.seedvault.settings.SettingsManager
|
|||
import com.stevesoltys.seedvault.transport.backup.backupModule
|
||||
import com.stevesoltys.seedvault.transport.restore.restoreModule
|
||||
import org.koin.android.ext.koin.androidContext
|
||||
import org.koin.core.KoinApplication
|
||||
import org.koin.core.context.startKoin
|
||||
import org.koin.core.context.stopKoin
|
||||
import org.koin.dsl.module
|
||||
|
||||
class TestApp : App() {
|
||||
|
@ -31,19 +33,22 @@ class TestApp : App() {
|
|||
single { SettingsManager(this@TestApp) }
|
||||
}
|
||||
|
||||
override fun startKoin() = startKoin {
|
||||
androidContext(this@TestApp)
|
||||
modules(
|
||||
listOf(
|
||||
testCryptoModule,
|
||||
headerModule,
|
||||
metadataModule,
|
||||
documentsProviderModule, // storage plugin
|
||||
backupModule,
|
||||
restoreModule,
|
||||
installModule,
|
||||
appModule
|
||||
override fun startKoin(): KoinApplication {
|
||||
stopKoin()
|
||||
return startKoin {
|
||||
androidContext(this@TestApp)
|
||||
modules(
|
||||
listOf(
|
||||
testCryptoModule,
|
||||
headerModule,
|
||||
metadataModule,
|
||||
documentsProviderModule, // storage plugin
|
||||
backupModule,
|
||||
restoreModule,
|
||||
installModule,
|
||||
appModule
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* SPDX-FileCopyrightText: 2024 The Calyx Institute
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package com.stevesoltys.seedvault.storage
|
||||
|
||||
import com.stevesoltys.seedvault.crypto.KeyManager
|
||||
import com.stevesoltys.seedvault.getRandomByteArray
|
||||
import com.stevesoltys.seedvault.plugins.webdav.WebDavTestConfig
|
||||
import com.stevesoltys.seedvault.transport.backup.BackupTest
|
||||
import io.mockk.mockk
|
||||
import kotlinx.coroutines.runBlocking
|
||||
import org.calyxos.backup.storage.api.StoredSnapshot
|
||||
import org.junit.Assert.assertArrayEquals
|
||||
import org.junit.Assert.assertEquals
|
||||
import org.junit.Test
|
||||
|
||||
internal class WebDavStoragePluginTest : BackupTest() {
|
||||
|
||||
private val keyManager: KeyManager = mockk()
|
||||
private val plugin = WebDavStoragePlugin(keyManager, "foo", WebDavTestConfig.getConfig())
|
||||
|
||||
private val snapshot = StoredSnapshot("foo.sv", System.currentTimeMillis())
|
||||
|
||||
@Test
|
||||
fun `test chunks`() = runBlocking {
|
||||
val chunkId1 = getRandomByteArray(32).toHexString()
|
||||
val chunkBytes1 = getRandomByteArray()
|
||||
|
||||
// first we don't have any chunks
|
||||
assertEquals(emptyList<String>(), plugin.getAvailableChunkIds())
|
||||
|
||||
// we write out chunk1
|
||||
plugin.getChunkOutputStream(chunkId1).use {
|
||||
it.write(chunkBytes1)
|
||||
}
|
||||
|
||||
try {
|
||||
// now we have the ID of chunk1
|
||||
assertEquals(listOf(chunkId1), plugin.getAvailableChunkIds())
|
||||
|
||||
// reading chunk1 matches what we wrote
|
||||
assertArrayEquals(
|
||||
chunkBytes1,
|
||||
plugin.getChunkInputStream(snapshot, chunkId1).readAllBytes(),
|
||||
)
|
||||
} finally {
|
||||
// delete chunk again
|
||||
plugin.deleteChunks(listOf(chunkId1))
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `test snapshots`() = runBlocking {
|
||||
val snapshotBytes = getRandomByteArray()
|
||||
|
||||
// first we don't have any snapshots
|
||||
assertEquals(emptyList<StoredSnapshot>(), plugin.getCurrentBackupSnapshots())
|
||||
assertEquals(emptyList<StoredSnapshot>(), plugin.getBackupSnapshotsForRestore())
|
||||
|
||||
// now write one snapshot
|
||||
plugin.getBackupSnapshotOutputStream(snapshot.timestamp).use {
|
||||
it.write(snapshotBytes)
|
||||
}
|
||||
|
||||
try {
|
||||
// now we have that one snapshot
|
||||
assertEquals(listOf(snapshot), plugin.getCurrentBackupSnapshots())
|
||||
assertEquals(listOf(snapshot), plugin.getBackupSnapshotsForRestore())
|
||||
|
||||
// read back written snapshot
|
||||
assertArrayEquals(
|
||||
snapshotBytes,
|
||||
plugin.getBackupSnapshotInputStream(snapshot).readAllBytes(),
|
||||
)
|
||||
|
||||
// other device writes another snapshot
|
||||
val otherPlugin = WebDavStoragePlugin(keyManager, "bar", WebDavTestConfig.getConfig())
|
||||
val otherSnapshot = StoredSnapshot("bar.sv", System.currentTimeMillis())
|
||||
val otherSnapshotBytes = getRandomByteArray()
|
||||
assertEquals(emptyList<String>(), otherPlugin.getAvailableChunkIds())
|
||||
otherPlugin.getBackupSnapshotOutputStream(otherSnapshot.timestamp).use {
|
||||
it.write(otherSnapshotBytes)
|
||||
}
|
||||
try {
|
||||
// now that initial one snapshot is still the only current, but restore has both
|
||||
assertEquals(listOf(snapshot), plugin.getCurrentBackupSnapshots())
|
||||
assertEquals(
|
||||
setOf(snapshot, otherSnapshot),
|
||||
plugin.getBackupSnapshotsForRestore().toSet(), // set to avoid sorting issues
|
||||
)
|
||||
} finally {
|
||||
plugin.deleteBackupSnapshot(otherSnapshot)
|
||||
}
|
||||
} finally {
|
||||
plugin.deleteBackupSnapshot(snapshot)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private fun ByteArray.toHexString() = joinToString("") { "%02x".format(it) }
|
|
@ -42,12 +42,12 @@ class TestSafStoragePlugin(
|
|||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
override fun getChunkOutputStream(chunkId: String): OutputStream {
|
||||
override suspend fun getChunkOutputStream(chunkId: String): OutputStream {
|
||||
if (getLocationUri() == null) return nullStream
|
||||
return super.getChunkOutputStream(chunkId)
|
||||
}
|
||||
|
||||
override fun getBackupSnapshotOutputStream(timestamp: Long): OutputStream {
|
||||
override suspend fun getBackupSnapshotOutputStream(timestamp: Long): OutputStream {
|
||||
if (root == null) return nullStream
|
||||
return super.getBackupSnapshotOutputStream(timestamp)
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ public sealed class SnapshotResult {
|
|||
public data class StoredSnapshot(
|
||||
/**
|
||||
* The unique ID of the current device/user combination chosen by the [StoragePlugin].
|
||||
* It may include an '.sv' extension.
|
||||
*/
|
||||
public val userId: String,
|
||||
/**
|
||||
|
|
|
@ -32,10 +32,10 @@ public interface StoragePlugin {
|
|||
public fun hasMasterKey(): Boolean
|
||||
|
||||
@Throws(IOException::class)
|
||||
public fun getChunkOutputStream(chunkId: String): OutputStream
|
||||
public suspend fun getChunkOutputStream(chunkId: String): OutputStream
|
||||
|
||||
@Throws(IOException::class)
|
||||
public fun getBackupSnapshotOutputStream(timestamp: Long): OutputStream
|
||||
public suspend fun getBackupSnapshotOutputStream(timestamp: Long): OutputStream
|
||||
|
||||
/* Restore */
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ internal class ChunkWriter(
|
|||
private val buffer = ByteArray(bufferSize)
|
||||
|
||||
@Throws(IOException::class, GeneralSecurityException::class)
|
||||
fun writeChunk(
|
||||
suspend fun writeChunk(
|
||||
inputStream: InputStream,
|
||||
chunks: List<Chunk>,
|
||||
missingChunkIds: List<String>,
|
||||
|
@ -67,7 +67,7 @@ internal class ChunkWriter(
|
|||
}
|
||||
|
||||
@Throws(IOException::class, GeneralSecurityException::class)
|
||||
private fun writeChunkData(chunkId: String, writer: (OutputStream) -> Unit) {
|
||||
private suspend fun writeChunkData(chunkId: String, writer: (OutputStream) -> Unit) {
|
||||
storagePlugin.getChunkOutputStream(chunkId).use { chunkStream ->
|
||||
chunkStream.write(VERSION.toInt())
|
||||
val ad = streamCrypto.getAssociatedDataForChunk(chunkId)
|
||||
|
@ -102,7 +102,7 @@ internal class ChunkWriter(
|
|||
* @return true if the chunk was written or false, if it was present already.
|
||||
*/
|
||||
@Throws(IOException::class, GeneralSecurityException::class)
|
||||
fun writeZipChunk(
|
||||
suspend fun writeZipChunk(
|
||||
chunk: ZipChunk,
|
||||
zip: ByteArrayOutputStream,
|
||||
missingChunkIds: List<String>,
|
||||
|
|
|
@ -74,7 +74,7 @@ internal class FileBackup(
|
|||
)
|
||||
|
||||
@Throws(IOException::class, GeneralSecurityException::class)
|
||||
private fun backupFile(
|
||||
private suspend fun backupFile(
|
||||
file: ContentFile,
|
||||
availableChunkIds: HashSet<String>,
|
||||
): FileBackupResult {
|
||||
|
|
|
@ -108,7 +108,7 @@ internal class SmallFileBackup(
|
|||
* Returns null, if there's space in the zip chunk and the next file can be added.
|
||||
*/
|
||||
@Throws(IOException::class, GeneralSecurityException::class)
|
||||
private fun makeZipChunk(
|
||||
private suspend fun makeZipChunk(
|
||||
window: List<ContentFile>,
|
||||
missingChunkIds: List<String>,
|
||||
): SmallFileBackupResult? {
|
||||
|
@ -127,7 +127,7 @@ internal class SmallFileBackup(
|
|||
}
|
||||
|
||||
@Throws(IOException::class, GeneralSecurityException::class)
|
||||
private fun finalizeAndReset(
|
||||
private suspend fun finalizeAndReset(
|
||||
zipChunker: ZipChunker,
|
||||
missingChunkIds: List<String>,
|
||||
): SmallFileBackupResult {
|
||||
|
|
|
@ -61,7 +61,7 @@ internal class ZipChunker(
|
|||
* This object gets reset for the next operation.
|
||||
*/
|
||||
@Throws(IOException::class, GeneralSecurityException::class)
|
||||
fun finalizeAndReset(missingChunkIds: List<String>): ZipChunk = try {
|
||||
suspend fun finalizeAndReset(missingChunkIds: List<String>): ZipChunk = try {
|
||||
zipOutputStream.finish()
|
||||
zipOutputStream.close()
|
||||
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
* SPDX-FileCopyrightText: 2024 The Calyx Institute
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package org.calyxos.backup.storage.plugin
|
||||
|
||||
public object PluginConstants {
|
||||
|
||||
public val folderRegex: Regex = Regex("^[a-f0-9]{16}\\.sv$")
|
||||
public val chunkFolderRegex: Regex = Regex("[a-f0-9]{2}")
|
||||
public val chunkRegex: Regex = Regex("[a-f0-9]{64}")
|
||||
public val snapshotRegex: Regex = Regex("([0-9]{13})\\.SeedSnap") // good until the year 2286
|
||||
public const val MIME_TYPE: String = "application/octet-stream"
|
||||
public const val CHUNK_FOLDER_COUNT: Int = 256
|
||||
|
||||
}
|
|
@ -7,6 +7,7 @@ package org.calyxos.backup.storage.plugin.saf
|
|||
|
||||
import androidx.documentfile.provider.DocumentFile
|
||||
import org.calyxos.backup.storage.api.StoredSnapshot
|
||||
import org.calyxos.backup.storage.plugin.PluginConstants.CHUNK_FOLDER_COUNT
|
||||
|
||||
/**
|
||||
* Accessing files and attributes via SAF is costly.
|
||||
|
|
|
@ -14,6 +14,12 @@ import androidx.documentfile.provider.DocumentFile
|
|||
import org.calyxos.backup.storage.api.StoragePlugin
|
||||
import org.calyxos.backup.storage.api.StoredSnapshot
|
||||
import org.calyxos.backup.storage.measure
|
||||
import org.calyxos.backup.storage.plugin.PluginConstants.CHUNK_FOLDER_COUNT
|
||||
import org.calyxos.backup.storage.plugin.PluginConstants.MIME_TYPE
|
||||
import org.calyxos.backup.storage.plugin.PluginConstants.chunkFolderRegex
|
||||
import org.calyxos.backup.storage.plugin.PluginConstants.chunkRegex
|
||||
import org.calyxos.backup.storage.plugin.PluginConstants.folderRegex
|
||||
import org.calyxos.backup.storage.plugin.PluginConstants.snapshotRegex
|
||||
import org.calyxos.backup.storage.plugin.saf.DocumentFileExt.createDirectoryOrThrow
|
||||
import org.calyxos.backup.storage.plugin.saf.DocumentFileExt.createFileOrThrow
|
||||
import org.calyxos.backup.storage.plugin.saf.DocumentFileExt.findFileBlocking
|
||||
|
@ -25,13 +31,6 @@ import java.io.InputStream
|
|||
import java.io.OutputStream
|
||||
import kotlin.time.ExperimentalTime
|
||||
|
||||
private val folderRegex = Regex("^[a-f0-9]{16}\\.sv$")
|
||||
private val chunkFolderRegex = Regex("[a-f0-9]{2}")
|
||||
private val chunkRegex = Regex("[a-f0-9]{64}")
|
||||
private val snapshotRegex = Regex("([0-9]{13})\\.SeedSnap") // good until the year 2286
|
||||
private const val MIME_TYPE: String = "application/octet-stream"
|
||||
internal const val CHUNK_FOLDER_COUNT = 256
|
||||
|
||||
private const val TAG = "SafStoragePlugin"
|
||||
|
||||
/**
|
||||
|
@ -154,7 +153,7 @@ public abstract class SafStoragePlugin(
|
|||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
override fun getChunkOutputStream(chunkId: String): OutputStream {
|
||||
override suspend fun getChunkOutputStream(chunkId: String): OutputStream {
|
||||
val chunkFolderName = chunkId.substring(0, 2)
|
||||
val chunkFolder =
|
||||
cache.backupChunkFolders[chunkFolderName] ?: error("No folder for chunk $chunkId")
|
||||
|
@ -164,7 +163,7 @@ public abstract class SafStoragePlugin(
|
|||
}
|
||||
|
||||
@Throws(IOException::class)
|
||||
override fun getBackupSnapshotOutputStream(timestamp: Long): OutputStream {
|
||||
override suspend fun getBackupSnapshotOutputStream(timestamp: Long): OutputStream {
|
||||
val folder = folder ?: throw IOException()
|
||||
val name = timestampToSnapshot(timestamp)
|
||||
// TODO should we check if it exists first?
|
||||
|
|
|
@ -19,7 +19,6 @@ import io.mockk.just
|
|||
import io.mockk.mockk
|
||||
import io.mockk.mockkStatic
|
||||
import io.mockk.slot
|
||||
import io.mockk.verify
|
||||
import kotlinx.coroutines.flow.toList
|
||||
import kotlinx.coroutines.runBlocking
|
||||
import org.calyxos.backup.storage.api.SnapshotResult
|
||||
|
@ -150,14 +149,14 @@ internal class BackupRestoreTest {
|
|||
} returns ByteArrayInputStream(fileDBytes) andThen ByteArrayInputStream(fileDBytes)
|
||||
|
||||
// output streams and caching
|
||||
every { plugin.getChunkOutputStream(any()) } returnsMany listOf(
|
||||
coEvery { plugin.getChunkOutputStream(any()) } returnsMany listOf(
|
||||
zipChunkOutputStream, mOutputStream, dOutputStream
|
||||
)
|
||||
every { chunksCache.insert(any<CachedChunk>()) } just Runs
|
||||
every { filesCache.upsert(capture(cachedFiles)) } just Runs
|
||||
|
||||
// snapshot writing
|
||||
every {
|
||||
coEvery {
|
||||
plugin.getBackupSnapshotOutputStream(capture(snapshotTimestamp))
|
||||
} returns snapshotOutputStream
|
||||
every { db.applyInParts<String>(any(), any()) } just Runs
|
||||
|
@ -296,25 +295,25 @@ internal class BackupRestoreTest {
|
|||
|
||||
// output streams for deterministic chunks
|
||||
val id040f32 = ByteArrayOutputStream()
|
||||
every {
|
||||
coEvery {
|
||||
plugin.getChunkOutputStream(
|
||||
"040f3204869543c4015d92c04bf875b25ebde55f9645380f4172aa439b2825d3"
|
||||
)
|
||||
} returns id040f32
|
||||
val id901fbc = ByteArrayOutputStream()
|
||||
every {
|
||||
coEvery {
|
||||
plugin.getChunkOutputStream(
|
||||
"901fbcf9a94271fc0455d0052522cab994f9392d0bb85187860282b4beadfb29"
|
||||
)
|
||||
} returns id901fbc
|
||||
val id5adea3 = ByteArrayOutputStream()
|
||||
every {
|
||||
coEvery {
|
||||
plugin.getChunkOutputStream(
|
||||
"5adea3149fe6cf9c6e3270a52ee2c31bc9dfcef5f2080b583a4dd3b779c9182d"
|
||||
)
|
||||
} returns id5adea3
|
||||
val id40d00c = ByteArrayOutputStream()
|
||||
every {
|
||||
coEvery {
|
||||
plugin.getChunkOutputStream(
|
||||
"40d00c1be4b0f89e8b12d47f3658aa42f568a8d02b978260da6d0050e7007e67"
|
||||
)
|
||||
|
@ -324,7 +323,7 @@ internal class BackupRestoreTest {
|
|||
every { filesCache.upsert(capture(cachedFiles)) } just Runs
|
||||
|
||||
// snapshot writing
|
||||
every {
|
||||
coEvery {
|
||||
plugin.getBackupSnapshotOutputStream(capture(snapshotTimestamp))
|
||||
} returns snapshotOutputStream
|
||||
every { db.applyInParts<String>(any(), any()) } just Runs
|
||||
|
@ -332,7 +331,7 @@ internal class BackupRestoreTest {
|
|||
backup.runBackup(null)
|
||||
|
||||
// chunks were only written to storage once
|
||||
verify(exactly = 1) {
|
||||
coVerify(exactly = 1) {
|
||||
plugin.getChunkOutputStream(
|
||||
"040f3204869543c4015d92c04bf875b25ebde55f9645380f4172aa439b2825d3")
|
||||
plugin.getChunkOutputStream(
|
||||
|
|
|
@ -7,9 +7,11 @@ package org.calyxos.backup.storage.backup
|
|||
|
||||
import io.mockk.MockKMatcherScope
|
||||
import io.mockk.Runs
|
||||
import io.mockk.coEvery
|
||||
import io.mockk.every
|
||||
import io.mockk.just
|
||||
import io.mockk.mockk
|
||||
import kotlinx.coroutines.runBlocking
|
||||
import org.calyxos.backup.storage.api.StoragePlugin
|
||||
import org.calyxos.backup.storage.backup.Backup.Companion.VERSION
|
||||
import org.calyxos.backup.storage.crypto.Hkdf.KEY_SIZE_BYTES
|
||||
|
@ -45,7 +47,7 @@ internal class ChunkWriterTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
fun testTwoByteChunksNotCached() {
|
||||
fun testTwoByteChunksNotCached() = runBlocking {
|
||||
val inputBytes = byteArrayOf(0x00, 0x01, 0x02, 0x03, 0x04, 0x05)
|
||||
val inputStream = ByteArrayInputStream(inputBytes)
|
||||
val chunks = listOf(
|
||||
|
@ -64,9 +66,9 @@ internal class ChunkWriterTest {
|
|||
every { chunksCache.get(chunkId3) } returns null
|
||||
|
||||
// get the output streams for the chunks
|
||||
every { storagePlugin.getChunkOutputStream(chunkId1) } returns chunk1Output
|
||||
every { storagePlugin.getChunkOutputStream(chunkId2) } returns chunk2Output
|
||||
every { storagePlugin.getChunkOutputStream(chunkId3) } returns chunk3Output
|
||||
coEvery { storagePlugin.getChunkOutputStream(chunkId1) } returns chunk1Output
|
||||
coEvery { storagePlugin.getChunkOutputStream(chunkId2) } returns chunk2Output
|
||||
coEvery { storagePlugin.getChunkOutputStream(chunkId3) } returns chunk3Output
|
||||
|
||||
// get AD
|
||||
every { streamCrypto.getAssociatedDataForChunk(chunkId1) } returns ad1
|
||||
|
@ -103,7 +105,7 @@ internal class ChunkWriterTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
fun testCachedChunksSkippedIfNotMissing() {
|
||||
fun testCachedChunksSkippedIfNotMissing() = runBlocking {
|
||||
val inputBytes = byteArrayOf(0x00, 0x01, 0x02, 0x03, 0x04, 0x05)
|
||||
val inputStream = ByteArrayInputStream(inputBytes)
|
||||
val chunks = listOf(
|
||||
|
@ -120,7 +122,7 @@ internal class ChunkWriterTest {
|
|||
every { chunksCache.get(chunkId3) } returns null
|
||||
|
||||
// get and wrap the output stream for chunk that is missing
|
||||
every { storagePlugin.getChunkOutputStream(chunkId1) } returns chunk1Output
|
||||
coEvery { storagePlugin.getChunkOutputStream(chunkId1) } returns chunk1Output
|
||||
every { streamCrypto.getAssociatedDataForChunk(chunkId1) } returns ad1
|
||||
every {
|
||||
streamCrypto.newEncryptingStream(streamKey, chunk1Output, bytes(34))
|
||||
|
@ -130,7 +132,7 @@ internal class ChunkWriterTest {
|
|||
every { chunksCache.insert(chunks[0].toCachedChunk()) } just Runs
|
||||
|
||||
// get and wrap the output stream for chunk that isn't cached
|
||||
every { storagePlugin.getChunkOutputStream(chunkId3) } returns chunk3Output
|
||||
coEvery { storagePlugin.getChunkOutputStream(chunkId3) } returns chunk3Output
|
||||
every { streamCrypto.getAssociatedDataForChunk(chunkId3) } returns ad3
|
||||
every {
|
||||
streamCrypto.newEncryptingStream(streamKey, chunk3Output, bytes(34))
|
||||
|
@ -149,7 +151,7 @@ internal class ChunkWriterTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
fun testLargerRandomChunks() {
|
||||
fun testLargerRandomChunks() = runBlocking {
|
||||
val chunk1Bytes = Random.nextBytes(Random.nextInt(1, 1024 * 1024))
|
||||
val chunk2Bytes = Random.nextBytes(Random.nextInt(1, 1024 * 1024))
|
||||
val chunk3Bytes = Random.nextBytes(Random.nextInt(1, 1024 * 1024))
|
||||
|
@ -173,8 +175,8 @@ internal class ChunkWriterTest {
|
|||
every { chunksCache.get(chunkId3) } returns null
|
||||
|
||||
// get the output streams for the chunks
|
||||
every { storagePlugin.getChunkOutputStream(chunkId1) } returns chunk1Output
|
||||
every { storagePlugin.getChunkOutputStream(chunkId3) } returns chunk3Output
|
||||
coEvery { storagePlugin.getChunkOutputStream(chunkId1) } returns chunk1Output
|
||||
coEvery { storagePlugin.getChunkOutputStream(chunkId3) } returns chunk3Output
|
||||
|
||||
// get AD
|
||||
every { streamCrypto.getAssociatedDataForChunk(chunkId1) } returns ad1
|
||||
|
|
|
@ -91,7 +91,7 @@ internal class SmallFileBackupIntegrationTest {
|
|||
|
||||
every { mac.doFinal(any<ByteArray>()) } returns chunkId
|
||||
every { chunksCache.get(any()) } returns null
|
||||
every { storagePlugin.getChunkOutputStream(any()) } returns outputStream2
|
||||
coEvery { storagePlugin.getChunkOutputStream(any()) } returns outputStream2
|
||||
every {
|
||||
chunksCache.insert(match<CachedChunk> { cachedChunk ->
|
||||
cachedChunk.id == chunkId.toHexString() &&
|
||||
|
|
|
@ -7,6 +7,7 @@ package org.calyxos.backup.storage.backup
|
|||
|
||||
import android.content.ContentResolver
|
||||
import io.mockk.Runs
|
||||
import io.mockk.coEvery
|
||||
import io.mockk.every
|
||||
import io.mockk.just
|
||||
import io.mockk.mockk
|
||||
|
@ -99,7 +100,7 @@ internal class SmallFileBackupTest {
|
|||
if (cachedFile == null) emptyList() else cachedFile.chunks - availableChunkIds
|
||||
|
||||
addFile(file, cachedFile)
|
||||
every { zipChunker.finalizeAndReset(missingChunks) } returns zipChunk
|
||||
coEvery { zipChunker.finalizeAndReset(missingChunks) } returns zipChunk
|
||||
every { filesCache.upsert(sameCachedFile(newCachedFile)) } just Runs
|
||||
|
||||
val result = smallFileBackup.backupFiles(files, availableChunkIds, null)
|
||||
|
@ -123,7 +124,7 @@ internal class SmallFileBackupTest {
|
|||
every { filesCache.getByUri(file1.uri) } returns null
|
||||
every { contentResolver.openInputStream(file1.uri) } throws IOException()
|
||||
addFile(file2)
|
||||
every { zipChunker.finalizeAndReset(emptyList()) } returns zipChunk
|
||||
coEvery { zipChunker.finalizeAndReset(emptyList()) } returns zipChunk
|
||||
every { filesCache.upsert(sameCachedFile(cachedFile2)) } just Runs
|
||||
|
||||
val result = smallFileBackup.backupFiles(files, availableChunkIds, null)
|
||||
|
@ -148,7 +149,7 @@ internal class SmallFileBackupTest {
|
|||
|
||||
addFile(file1)
|
||||
every { zipChunker.fitsFile(file2) } returns false
|
||||
every { zipChunker.finalizeAndReset(emptyList()) } returns zipChunk1 andThen zipChunk2
|
||||
coEvery { zipChunker.finalizeAndReset(emptyList()) } returns zipChunk1 andThen zipChunk2
|
||||
every { filesCache.upsert(sameCachedFile(cachedFile1)) } just Runs
|
||||
addFile(file2)
|
||||
// zipChunker.finalizeAndReset defined above for both files
|
||||
|
@ -178,7 +179,7 @@ internal class SmallFileBackupTest {
|
|||
addFile(file1)
|
||||
every { zipChunker.fitsFile(file2) } returns true
|
||||
addFile(file2)
|
||||
every { zipChunker.finalizeAndReset(emptyList()) } returns zipChunk
|
||||
coEvery { zipChunker.finalizeAndReset(emptyList()) } returns zipChunk
|
||||
every { filesCache.upsert(sameCachedFile(cachedFile1)) } just Runs
|
||||
every { filesCache.upsert(sameCachedFile(cachedFile2)) } just Runs
|
||||
|
||||
|
@ -200,7 +201,7 @@ internal class SmallFileBackupTest {
|
|||
addFile(file1)
|
||||
every { zipChunker.fitsFile(file2) } returns true
|
||||
addFile(file2)
|
||||
every { zipChunker.finalizeAndReset(emptyList()) } throws IOException()
|
||||
coEvery { zipChunker.finalizeAndReset(emptyList()) } throws IOException()
|
||||
|
||||
val result = smallFileBackup.backupFiles(files, hashSetOf(), null)
|
||||
assertEquals(emptySet<String>(), result.chunkIds)
|
||||
|
|
|
@ -6,9 +6,11 @@
|
|||
package org.calyxos.backup.storage.backup
|
||||
|
||||
import io.mockk.Runs
|
||||
import io.mockk.coEvery
|
||||
import io.mockk.every
|
||||
import io.mockk.just
|
||||
import io.mockk.mockk
|
||||
import kotlinx.coroutines.runBlocking
|
||||
import org.calyxos.backup.storage.getRandomDocFile
|
||||
import org.calyxos.backup.storage.getRandomString
|
||||
import org.calyxos.backup.storage.toHexString
|
||||
|
@ -76,7 +78,7 @@ internal class ZipChunkerTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
fun `two files finalizeAndReset()`() {
|
||||
fun `two files finalizeAndReset()`() = runBlocking {
|
||||
val file1 = getRandomDocFile(4)
|
||||
val file2 = getRandomDocFile(8)
|
||||
val chunkIdBytes = Random.nextBytes(64)
|
||||
|
@ -90,7 +92,7 @@ internal class ZipChunkerTest {
|
|||
zipChunker.addFile(file2, fileInputStream)
|
||||
|
||||
every { mac.doFinal(any()) } returns chunkIdBytes
|
||||
every { chunkWriter.writeZipChunk(zipChunk, any(), missingChunks) } returns wasWritten
|
||||
coEvery { chunkWriter.writeZipChunk(zipChunk, any(), missingChunks) } returns wasWritten
|
||||
|
||||
assertEquals(
|
||||
zipChunk.copy(wasUploaded = wasWritten),
|
||||
|
@ -104,7 +106,7 @@ internal class ZipChunkerTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
fun `throwing in finalizeAndReset() resets counter`() {
|
||||
fun `throwing in finalizeAndReset() resets counter`() = runBlocking {
|
||||
val file1 = getRandomDocFile(4)
|
||||
val file2 = getRandomDocFile(8)
|
||||
val chunkIdBytes = Random.nextBytes(64)
|
||||
|
@ -117,7 +119,7 @@ internal class ZipChunkerTest {
|
|||
zipChunker.addFile(file2, fileInputStream)
|
||||
|
||||
every { mac.doFinal(any()) } returns chunkIdBytes
|
||||
every { chunkWriter.writeZipChunk(zipChunk, any(), missingChunks) } throws IOException()
|
||||
coEvery { chunkWriter.writeZipChunk(zipChunk, any(), missingChunks) } throws IOException()
|
||||
|
||||
assertFailsWith(IOException::class) {
|
||||
zipChunker.finalizeAndReset(missingChunks)
|
||||
|
|
Loading…
Reference in a new issue