lib/model, gui: Correct completion percentages when there are lots of deletes (fixes #3496)

We used to consider deleted files & directories 128 bytes large. After
the delta indexes change a bug slipped in where deleted files would be
weighted according to their old non-deleted size. Both ways are
incorrect (but the latest change made it worse), as if there are more
files deleted than remaining data in the repo the needSize can be
greater than the globalSize, resulting in a negative completion
percentage.

This change makes it so that deleted items are zero bytes large, which
makes more sense. Instead we expose the number of files that we need to
delete as a separate field in the Completion() result, and hack the
percentage down to 95% complete if it was 100% complete but we need to
delete files. This latter part is sort of ugly, but necessary to give
the user some sort of feedback.

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/3556
This commit is contained in:
Jakob Borg 2016-09-02 06:45:46 +00:00 committed by Audrius Butkevicius
parent 1188ebbb7b
commit 5b37d0356c
6 changed files with 147 additions and 8 deletions

View File

@ -588,6 +588,7 @@ func (s *apiService) getDBCompletion(w http.ResponseWriter, r *http.Request) {
"completion": comp.CompletionPct,
"needBytes": comp.NeedBytes,
"globalBytes": comp.GlobalBytes,
"needDeletes": comp.NeedDeletes,
})
}

View File

@ -439,13 +439,14 @@ angular.module('syncthing.core')
}
function recalcCompletion(device) {
var total = 0, needed = 0;
var total = 0, needed = 0, deletes = 0;
for (var folder in $scope.completion[device]) {
if (folder === "_total") {
continue;
}
total += $scope.completion[device][folder].globalBytes;
needed += $scope.completion[device][folder].needBytes;
deletes += $scope.completion[device][folder].needDeletes;
}
if (total == 0) {
$scope.completion[device]._total = 100;
@ -453,6 +454,13 @@ angular.module('syncthing.core')
$scope.completion[device]._total = 100 * (1 - needed / total);
}
if (needed == 0 && deletes > 0) {
// We don't need any data, but we have deletes that we need
// to do. Drop down the completion percentage to indicate
// that we have stuff to do.
$scope.completion[device]._total = 95;
}
console.log("recalcCompletion", device, $scope.completion[device]);
}

View File

@ -47,8 +47,11 @@ func (f FileInfoTruncated) HasPermissionBits() bool {
}
func (f FileInfoTruncated) FileSize() int64 {
if f.IsDirectory() || f.IsDeleted() {
return 128
if f.Deleted {
return 0
}
if f.IsDirectory() {
return protocol.SyntheticDirectorySize
}
return f.Size
}

View File

@ -463,6 +463,7 @@ type FolderCompletion struct {
CompletionPct float64
NeedBytes int64
GlobalBytes int64
NeedDeletes int64
}
// Completion returns the completion status, in percent, for the given device
@ -487,14 +488,20 @@ func (m *Model) Completion(device protocol.DeviceID, folder string) FolderComple
counts := m.deviceDownloads[device].GetBlockCounts(folder)
m.pmut.RUnlock()
var need, fileNeed, downloaded int64
var need, fileNeed, downloaded, deletes int64
rf.WithNeedTruncated(device, func(f db.FileIntf) bool {
ft := f.(db.FileInfoTruncated)
// If the file is deleted, we account it only in the deleted column.
if ft.Deleted {
deletes++
return true
}
// This might might be more than it really is, because some blocks can be of a smaller size.
downloaded = int64(counts[ft.Name] * protocol.BlockSize)
fileNeed = ft.Size - downloaded
fileNeed = ft.FileSize() - downloaded
if fileNeed < 0 {
fileNeed = 0
}
@ -505,12 +512,22 @@ func (m *Model) Completion(device protocol.DeviceID, folder string) FolderComple
needRatio := float64(need) / float64(tot)
completionPct := 100 * (1 - needRatio)
// If the completion is 100% but there are deletes we need to handle,
// drop it down a notch. Hack for consumers that look only at the
// percentage (our own GUI does the same calculation as here on it's own
// and needs the same fixup).
if need == 0 && deletes > 0 {
completionPct = 95 // chosen by fair dice roll
}
l.Debugf("%v Completion(%s, %q): %f (%d / %d = %f)", m, device, folder, completionPct, need, tot, needRatio)
return FolderCompletion{
CompletionPct: completionPct,
NeedBytes: need,
GlobalBytes: tot,
NeedDeletes: deletes,
}
}
@ -1762,7 +1779,7 @@ func (m *Model) internalScanFolderSubdirs(folder string, subDirs []string) error
nf := protocol.FileInfo{
Name: f.Name,
Type: f.Type,
Size: f.Size,
Size: 0,
ModifiedS: f.ModifiedS,
ModifiedNs: f.ModifiedNs,
Deleted: true,
@ -1927,6 +1944,7 @@ func (m *Model) Override(folder string) {
need.Deleted = true
need.Blocks = nil
need.Version = need.Version.Update(m.shortID)
need.Size = 0
} else {
// We have the file, replace with our version
have.Version = have.Version.Merge(need.Version).Update(m.shortID)

View File

@ -93,6 +93,7 @@ func TestRequest(t *testing.T) {
m.AddFolder(defaultFolderConfig)
m.StartFolder("default")
m.ServeBackground()
defer m.Stop()
m.ScanFolder("default")
bs := make([]byte, protocol.BlockSize)
@ -168,6 +169,7 @@ func benchmarkIndex(b *testing.B, nfiles int) {
m.AddFolder(defaultFolderConfig)
m.StartFolder("default")
m.ServeBackground()
defer m.Stop()
files := genFiles(nfiles)
m.Index(device1, "default", files)
@ -197,6 +199,7 @@ func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) {
m.AddFolder(defaultFolderConfig)
m.StartFolder("default")
m.ServeBackground()
defer m.Stop()
files := genFiles(nfiles)
ufiles := genFiles(nufiles)
@ -278,6 +281,7 @@ func BenchmarkRequest(b *testing.B) {
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig)
m.ServeBackground()
defer m.Stop()
m.ScanFolder("default")
const n = 1000
@ -346,6 +350,7 @@ func TestDeviceRename(t *testing.T) {
m.AddConnection(conn, hello)
m.ServeBackground()
defer m.Stop()
if cfg.Devices()[device1].Name != "" {
t.Errorf("Device already has a name")
@ -424,6 +429,7 @@ func TestClusterConfig(t *testing.T) {
m.AddFolder(cfg.Folders[0])
m.AddFolder(cfg.Folders[1])
m.ServeBackground()
defer m.Stop()
cm := m.generateClusterConfig(device2)
@ -495,6 +501,7 @@ func TestIgnores(t *testing.T) {
m.AddFolder(defaultFolderConfig)
m.StartFolder("default")
m.ServeBackground()
defer m.Stop()
expected := []string{
".*",
@ -590,6 +597,7 @@ func TestROScanRecovery(t *testing.T) {
m.AddFolder(fcfg)
m.StartFolder("default")
m.ServeBackground()
defer m.Stop()
waitFor := func(status string) error {
timeout := time.Now().Add(2 * time.Second)
@ -676,6 +684,7 @@ func TestRWScanRecovery(t *testing.T) {
m.AddFolder(fcfg)
m.StartFolder("default")
m.ServeBackground()
defer m.Stop()
waitFor := func(status string) error {
timeout := time.Now().Add(2 * time.Second)
@ -739,6 +748,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
m.AddFolder(defaultFolderConfig)
m.ServeBackground()
defer m.Stop()
b := func(isfile bool, path ...string) protocol.FileInfo {
typ := protocol.FileInfoTypeDirectory
@ -1646,6 +1656,98 @@ func TestSharedWithClearedOnDisconnect(t *testing.T) {
}
}
func TestIssue3496(t *testing.T) {
// It seems like lots of deleted files can cause negative completion
// percentages. Lets make sure that doesn't happen. Also do some general
// checks on the completion calculation stuff.
dbi := db.OpenMemory()
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", dbi, nil)
m.AddFolder(defaultFolderConfig)
m.StartFolder("default")
m.ServeBackground()
defer m.Stop()
m.ScanFolder("default")
addFakeConn(m, device1)
addFakeConn(m, device2)
// Reach into the model and grab the current file list...
m.fmut.RLock()
fs := m.folderFiles["default"]
m.fmut.RUnlock()
var localFiles []protocol.FileInfo
fs.WithHave(protocol.LocalDeviceID, func(i db.FileIntf) bool {
localFiles = append(localFiles, i.(protocol.FileInfo))
return true
})
// Mark all files as deleted and fake it as update from device1
for i := range localFiles {
localFiles[i].Deleted = true
localFiles[i].Version = localFiles[i].Version.Update(device1.Short())
localFiles[i].Blocks = nil
}
// Also add a small file that we're supposed to need, or the global size
// stuff will bail out early due to the entire folder being zero size.
localFiles = append(localFiles, protocol.FileInfo{
Name: "fake",
Size: 1234,
Type: protocol.FileInfoTypeFile,
Version: protocol.Vector{Counters: []protocol.Counter{{ID: device1.Short(), Value: 42}}},
})
m.IndexUpdate(device1, "default", localFiles)
// Check that the completion percentage for us makes sense
comp := m.Completion(protocol.LocalDeviceID, "default")
if comp.NeedBytes > comp.GlobalBytes {
t.Errorf("Need more bytes than exist, not possible: %d > %d", comp.NeedBytes, comp.GlobalBytes)
}
if comp.CompletionPct < 0 {
t.Errorf("Less than zero percent complete, not possible: %.02f%%", comp.CompletionPct)
}
if comp.NeedBytes == 0 {
t.Error("Need no bytes even though some files are deleted")
}
if comp.CompletionPct == 100 {
t.Errorf("Fully complete, not possible: %.02f%%", comp.CompletionPct)
}
t.Log(comp)
}
func addFakeConn(m *Model, dev protocol.DeviceID) {
conn1 := connections.Connection{
IntermediateConnection: connections.IntermediateConnection{
Conn: tls.Client(&fakeConn{}, nil),
Type: "foo",
Priority: 10,
},
Connection: &FakeConnection{
id: dev,
},
}
m.AddConnection(conn1, protocol.HelloResult{})
m.ClusterConfig(device1, protocol.ClusterConfig{
Folders: []protocol.Folder{
{
ID: "default",
Devices: []protocol.Device{
{ID: device1[:]},
{ID: device2[:]},
},
},
},
})
}
type fakeAddr struct{}
func (fakeAddr) Network() string {

View File

@ -16,6 +16,10 @@ import (
"github.com/syncthing/syncthing/lib/rand"
)
const (
SyntheticDirectorySize = 128
)
var (
sha256OfEmptyBlock = sha256.Sum256(make([]byte, BlockSize))
HelloMessageMagic = uint32(0x2EA7D90B)
@ -56,8 +60,11 @@ func (f FileInfo) HasPermissionBits() bool {
}
func (f FileInfo) FileSize() int64 {
if f.IsDirectory() || f.IsDeleted() {
return 128
if f.Deleted {
return 0
}
if f.IsDirectory() {
return SyntheticDirectorySize
}
return f.Size
}