make file restore numbers add up by showing duplicates and errors
This commit is contained in:
parent
f51c758493
commit
3683f82363
7 changed files with 55 additions and 7 deletions
|
@ -13,7 +13,6 @@ import de.grobox.storagebackuptester.backup.getSpeed
|
||||||
import org.calyxos.backup.storage.api.BackupFile
|
import org.calyxos.backup.storage.api.BackupFile
|
||||||
import org.calyxos.backup.storage.restore.NotificationRestoreObserver
|
import org.calyxos.backup.storage.restore.NotificationRestoreObserver
|
||||||
import kotlin.time.DurationUnit
|
import kotlin.time.DurationUnit
|
||||||
import kotlin.time.ExperimentalTime
|
|
||||||
import kotlin.time.toDuration
|
import kotlin.time.toDuration
|
||||||
|
|
||||||
data class RestoreProgress(
|
data class RestoreProgress(
|
||||||
|
@ -41,6 +40,10 @@ class RestoreStats(
|
||||||
liveData.postValue(RestoreProgress(filesProcessed, totalFiles, text))
|
liveData.postValue(RestoreProgress(filesProcessed, totalFiles, text))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
override fun onFileDuplicatesRemoved(num: Int) {
|
||||||
|
// no-op
|
||||||
|
}
|
||||||
|
|
||||||
override fun onFileRestored(
|
override fun onFileRestored(
|
||||||
file: BackupFile,
|
file: BackupFile,
|
||||||
bytesWritten: Long,
|
bytesWritten: Long,
|
||||||
|
@ -68,7 +71,6 @@ class RestoreStats(
|
||||||
liveData.postValue(RestoreProgress(filesProcessed, totalFiles))
|
liveData.postValue(RestoreProgress(filesProcessed, totalFiles))
|
||||||
}
|
}
|
||||||
|
|
||||||
@OptIn(ExperimentalTime::class)
|
|
||||||
override fun onRestoreComplete(restoreDuration: Long) {
|
override fun onRestoreComplete(restoreDuration: Long) {
|
||||||
super.onRestoreComplete(restoreDuration)
|
super.onRestoreComplete(restoreDuration)
|
||||||
val sb = StringBuilder("\n")
|
val sb = StringBuilder("\n")
|
||||||
|
|
|
@ -7,6 +7,7 @@ package org.calyxos.backup.storage.api
|
||||||
|
|
||||||
public interface RestoreObserver {
|
public interface RestoreObserver {
|
||||||
public fun onRestoreStart(numFiles: Int, totalSize: Long)
|
public fun onRestoreStart(numFiles: Int, totalSize: Long)
|
||||||
|
public fun onFileDuplicatesRemoved(num: Int)
|
||||||
public fun onFileRestored(file: BackupFile, bytesWritten: Long, tag: String)
|
public fun onFileRestored(file: BackupFile, bytesWritten: Long, tag: String)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -21,24 +21,32 @@ internal data class RestorableChunk(
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Call this after the RestorableChunk is complete and **before** using it for restore.
|
* Call this after the RestorableChunk is complete and **before** using it for restore.
|
||||||
|
*
|
||||||
|
* @return the number of duplicate files removed
|
||||||
*/
|
*/
|
||||||
fun finalize() {
|
fun finalize(): Int {
|
||||||
// entries in the zip chunk need to be sorted by their index in the zip
|
// entries in the zip chunk need to be sorted by their index in the zip
|
||||||
files.sortBy { it.zipIndex }
|
files.sortBy { it.zipIndex }
|
||||||
// There might be duplicates in case the *exact* same set of files exists more than once
|
// There might be duplicates in case the *exact* same set of files exists more than once
|
||||||
// so they'll produce the same chunk ID.
|
// so they'll produce the same chunk ID.
|
||||||
// But since the content is there and this is an unlikely scenario, we drop the duplicates.
|
// But since the content is there and this is an unlikely scenario, we drop the duplicates.
|
||||||
var lastIndex = 0
|
var lastIndex = 0
|
||||||
|
var numRemoved = 0
|
||||||
val iterator = files.iterator()
|
val iterator = files.iterator()
|
||||||
while (iterator.hasNext()) {
|
while (iterator.hasNext()) {
|
||||||
val file = iterator.next()
|
val file = iterator.next()
|
||||||
val i = file.zipIndex
|
val i = file.zipIndex
|
||||||
when {
|
when {
|
||||||
i < lastIndex -> error("unsorted list")
|
i < lastIndex -> error("unsorted list")
|
||||||
i == lastIndex -> iterator.remove() // remove duplicate
|
i == lastIndex -> { // remove duplicate
|
||||||
|
numRemoved++
|
||||||
|
iterator.remove()
|
||||||
|
}
|
||||||
|
|
||||||
i > lastIndex -> lastIndex = i // gaps are possible when we don't restore all files
|
i > lastIndex -> lastIndex = i // gaps are possible when we don't restore all files
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return numRemoved
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,6 +95,14 @@ internal data class FileSplitterResult(
|
||||||
* Files referenced in [multiChunkMap] sorted for restoring.
|
* Files referenced in [multiChunkMap] sorted for restoring.
|
||||||
*/
|
*/
|
||||||
val multiChunkFiles: Collection<RestorableFile>,
|
val multiChunkFiles: Collection<RestorableFile>,
|
||||||
|
/**
|
||||||
|
* The number of duplicate files that was removed from [zipChunks].
|
||||||
|
* Duplicate files in [zipChunks] with the same chunk ID will have the same index in the ZIP.
|
||||||
|
* So we remove them to make restore easier.
|
||||||
|
* With some extra work, we could restore those files,
|
||||||
|
* but by not doing so we are probably doing a favor to the user.
|
||||||
|
*/
|
||||||
|
val numRemovedDuplicates: Int,
|
||||||
)
|
)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -121,7 +137,7 @@ internal object FileSplitter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// entries in the zip chunk need to be sorted by their index in the zip, duplicated removed
|
// entries in the zip chunk need to be sorted by their index in the zip, duplicated removed
|
||||||
zipChunkMap.values.forEach { zipChunk -> zipChunk.finalize() }
|
val numRemovedDuplicates = zipChunkMap.values.sumOf { zipChunk -> zipChunk.finalize() }
|
||||||
val singleChunks = chunkMap.values.filter { it.isSingle }
|
val singleChunks = chunkMap.values.filter { it.isSingle }
|
||||||
val multiChunks = chunkMap.filterValues { !it.isSingle }
|
val multiChunks = chunkMap.filterValues { !it.isSingle }
|
||||||
return FileSplitterResult(
|
return FileSplitterResult(
|
||||||
|
@ -129,6 +145,7 @@ internal object FileSplitter {
|
||||||
singleChunks = singleChunks,
|
singleChunks = singleChunks,
|
||||||
multiChunkMap = multiChunks,
|
multiChunkMap = multiChunks,
|
||||||
multiChunkFiles = getMultiFiles(multiChunks),
|
multiChunkFiles = getMultiFiles(multiChunks),
|
||||||
|
numRemovedDuplicates = numRemovedDuplicates,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,6 +162,7 @@ internal object FileSplitter {
|
||||||
f1.chunkIdsCount == f2.chunkIdsCount -> {
|
f1.chunkIdsCount == f2.chunkIdsCount -> {
|
||||||
f1.chunkIds.joinToString().compareTo(f2.chunkIds.joinToString())
|
f1.chunkIds.joinToString().compareTo(f2.chunkIds.joinToString())
|
||||||
}
|
}
|
||||||
|
|
||||||
else -> 1
|
else -> 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@ public open class NotificationRestoreObserver internal constructor(private val n
|
||||||
|
|
||||||
private var totalFiles = 0
|
private var totalFiles = 0
|
||||||
private var filesRestored = 0
|
private var filesRestored = 0
|
||||||
|
private var filesRemovedAsDuplicates = 0
|
||||||
private var filesWithError = 0
|
private var filesWithError = 0
|
||||||
|
|
||||||
override fun onRestoreStart(numFiles: Int, totalSize: Long) {
|
override fun onRestoreStart(numFiles: Int, totalSize: Long) {
|
||||||
|
@ -25,6 +26,10 @@ public open class NotificationRestoreObserver internal constructor(private val n
|
||||||
n.updateRestoreNotification(filesRestored + filesWithError, totalFiles)
|
n.updateRestoreNotification(filesRestored + filesWithError, totalFiles)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
override fun onFileDuplicatesRemoved(num: Int) {
|
||||||
|
filesRemovedAsDuplicates = num
|
||||||
|
}
|
||||||
|
|
||||||
override fun onFileRestored(file: BackupFile, bytesWritten: Long, tag: String) {
|
override fun onFileRestored(file: BackupFile, bytesWritten: Long, tag: String) {
|
||||||
filesRestored++
|
filesRestored++
|
||||||
n.updateRestoreNotification(filesRestored + filesWithError, totalFiles)
|
n.updateRestoreNotification(filesRestored + filesWithError, totalFiles)
|
||||||
|
@ -36,7 +41,13 @@ public open class NotificationRestoreObserver internal constructor(private val n
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun onRestoreComplete(restoreDuration: Long) {
|
override fun onRestoreComplete(restoreDuration: Long) {
|
||||||
n.showRestoreCompleteNotification(filesRestored, totalFiles, getRestoreCompleteIntent())
|
n.showRestoreCompleteNotification(
|
||||||
|
restored = filesRestored,
|
||||||
|
duplicates = filesRemovedAsDuplicates,
|
||||||
|
errors = filesWithError,
|
||||||
|
total = totalFiles,
|
||||||
|
intent = getRestoreCompleteIntent(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
protected open fun getRestoreCompleteIntent(): PendingIntent? {
|
protected open fun getRestoreCompleteIntent(): PendingIntent? {
|
||||||
|
|
|
@ -110,8 +110,10 @@ internal class Restore(
|
||||||
observer?.onRestoreStart(filesTotal, totalSize)
|
observer?.onRestoreStart(filesTotal, totalSize)
|
||||||
|
|
||||||
val split = FileSplitter.splitSnapshot(snapshot)
|
val split = FileSplitter.splitSnapshot(snapshot)
|
||||||
|
observer?.onFileDuplicatesRemoved(split.numRemovedDuplicates)
|
||||||
|
var restoredFiles = split.numRemovedDuplicates // count removed dups, so numbers add up
|
||||||
|
|
||||||
val version = snapshot.version
|
val version = snapshot.version
|
||||||
var restoredFiles = 0
|
|
||||||
val smallFilesDuration = measure {
|
val smallFilesDuration = measure {
|
||||||
restoredFiles += zipChunkRestore.restore(
|
restoredFiles += zipChunkRestore.restore(
|
||||||
version,
|
version,
|
||||||
|
|
|
@ -116,13 +116,25 @@ internal class Notifications(private val context: Context) {
|
||||||
|
|
||||||
internal fun showRestoreCompleteNotification(
|
internal fun showRestoreCompleteNotification(
|
||||||
restored: Int,
|
restored: Int,
|
||||||
|
duplicates: Int,
|
||||||
|
errors: Int,
|
||||||
total: Int,
|
total: Int,
|
||||||
intent: PendingIntent?,
|
intent: PendingIntent?,
|
||||||
) {
|
) {
|
||||||
val title = context.getString(R.string.notification_restore_complete_title, restored, total)
|
val title = context.getString(R.string.notification_restore_complete_title, restored, total)
|
||||||
|
val msg = StringBuilder().apply {
|
||||||
|
if (duplicates > 0) {
|
||||||
|
append(context.getString(R.string.notification_restore_complete_dups, duplicates))
|
||||||
|
}
|
||||||
|
if (errors > 0) {
|
||||||
|
if (duplicates > 0) append("\n")
|
||||||
|
append(context.getString(R.string.notification_restore_complete_errors, errors))
|
||||||
|
}
|
||||||
|
}.toString().ifEmpty { null }
|
||||||
val notification = NotificationCompat.Builder(context, CHANNEL_ID_BACKUP).apply {
|
val notification = NotificationCompat.Builder(context, CHANNEL_ID_BACKUP).apply {
|
||||||
setSmallIcon(R.drawable.ic_cloud_done)
|
setSmallIcon(R.drawable.ic_cloud_done)
|
||||||
setContentTitle(title)
|
setContentTitle(title)
|
||||||
|
setContentText(msg)
|
||||||
setOngoing(false)
|
setOngoing(false)
|
||||||
setShowWhen(true)
|
setShowWhen(true)
|
||||||
setAutoCancel(true)
|
setAutoCancel(true)
|
||||||
|
|
|
@ -18,6 +18,8 @@
|
||||||
<string name="notification_restore_title">Restoring files…</string>
|
<string name="notification_restore_title">Restoring files…</string>
|
||||||
<string name="notification_restore_info">%1$d/%2$d</string>
|
<string name="notification_restore_info">%1$d/%2$d</string>
|
||||||
<string name="notification_restore_complete_title">%1$d of %2$d files restored</string>
|
<string name="notification_restore_complete_title">%1$d of %2$d files restored</string>
|
||||||
|
<string name="notification_restore_complete_dups">%1$d files were duplicates.</string>
|
||||||
|
<string name="notification_restore_complete_errors">%1$d files had errors.</string>
|
||||||
|
|
||||||
<string name="snapshots_title">Available storage backups</string>
|
<string name="snapshots_title">Available storage backups</string>
|
||||||
<string name="snapshots_empty">No storage backups found\n\nSorry, but there is nothing that can be restored.</string>
|
<string name="snapshots_empty">No storage backups found\n\nSorry, but there is nothing that can be restored.</string>
|
||||||
|
|
Loading…
Reference in a new issue