lib: Return error from db.FileSet.Snapshot (fixes #7419, ref #5907) (#7424)

This commit is contained in:
Simon Frei 2021-03-07 13:43:22 +01:00 committed by GitHub
parent c1d06d9501
commit 310fba4c12
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 601 additions and 344 deletions

2
go.mod
View File

@ -28,6 +28,7 @@ require (
github.com/lucas-clemente/quic-go v0.19.3
github.com/maruel/panicparse v1.5.1
github.com/mattn/go-isatty v0.0.12
github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0 // indirect
github.com/minio/sha256-simd v0.1.1
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
@ -49,6 +50,7 @@ require (
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4
golang.org/x/text v0.3.4
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
golang.org/x/tools v0.1.0 // indirect
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
)

13
go.sum
View File

@ -261,6 +261,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0 h1:8E6DrFvII6QR4eJ3PkFvV+lc03P+2qwqTPLm1ax7694=
github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0/go.mod h1:fcEyUyXZXoV4Abw8DX0t7wyL8mCDxXyU4iAFZfT3IHw=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
@ -380,6 +382,7 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y=
github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10=
github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil/v3 v3.20.11 h1:NeVf1K0cgxsWz+N3671ojRptdgzvp7BXL3KV21R0JnA=
@ -446,6 +449,7 @@ github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMI
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0 h1:okhMind4q9H1OxF44gNegWkiP4H/gsTFLalHFa4OOUI=
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0/go.mod h1:TTbGUfE+cXXceWtbTHq6lqcTvYPBKLNejBEbnUsQJtU=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
@ -483,6 +487,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -507,6 +513,8 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102 h1:42cLlJJdEh+ySyeUUbEQ5bsTiq8voBeTuweGVkY6Puw=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -520,6 +528,7 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -582,7 +591,11 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -600,7 +600,13 @@ angular.module('syncthing.core')
}
$scope.completion[device][folder] = data;
recalcCompletion(device);
}).error($scope.emitHTTPError);
}).error(function(data, status, headers, config) {
if (status === 404) {
console.log("refreshCompletion:", data);
} else {
$scope.emitHTTPError(data, status, headers, config);
}
});
}
function refreshConnectionStats() {

View File

@ -600,7 +600,13 @@ angular.module('syncthing.core')
}
$scope.completion[device][folder] = data;
recalcCompletion(device);
}).error($scope.emitHTTPError);
}).error(function(data, status, headers, config) {
if (status === 404) {
console.log("refreshCompletion:", data);
} else {
$scope.emitHTTPError(data, status, headers, config);
}
})
}
function refreshConnectionStats() {

View File

@ -746,7 +746,15 @@ func (s *service) getDBCompletion(w http.ResponseWriter, r *http.Request) {
}
}
sendJSON(w, s.model.Completion(device, folder).Map())
if comp, err := s.model.Completion(device, folder); err != nil {
status := http.StatusInternalServerError
if isFolderNotFound(err) {
status = http.StatusNotFound
}
http.Error(w, err.Error(), status)
} else {
sendJSON(w, comp.Map())
}
}
func (s *service) getDBStatus(w http.ResponseWriter, r *http.Request) {
@ -878,8 +886,25 @@ func (s *service) getDBFile(w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query()
folder := qs.Get("folder")
file := qs.Get("file")
gf, gfOk := s.model.CurrentGlobalFile(folder, file)
lf, lfOk := s.model.CurrentFolderFile(folder, file)
errStatus := http.StatusInternalServerError
gf, gfOk, err := s.model.CurrentGlobalFile(folder, file)
if err != nil {
if isFolderNotFound(err) {
errStatus = http.StatusNotFound
}
http.Error(w, err.Error(), errStatus)
return
}
lf, lfOk, err := s.model.CurrentFolderFile(folder, file)
if err != nil {
if isFolderNotFound(err) {
errStatus = http.StatusNotFound
}
http.Error(w, err.Error(), errStatus)
return
}
if !(gfOk || lfOk) {
// This file for sure does not exist.
@ -887,7 +912,11 @@ func (s *service) getDBFile(w http.ResponseWriter, r *http.Request) {
return
}
av := s.model.Availability(folder, gf, protocol.BlockInfo{})
av, err := s.model.Availability(folder, gf, protocol.BlockInfo{})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
sendJSON(w, map[string]interface{}{
"global": jsonFileInfo(gf),
"local": jsonFileInfo(lf),
@ -1498,7 +1527,12 @@ func (s *service) getPeerCompletion(w http.ResponseWriter, r *http.Request) {
for _, device := range folder.DeviceIDs() {
deviceStr := device.String()
if _, ok := s.model.Connection(device); ok {
tot[deviceStr] += s.model.Completion(device, folder.ID).CompletionPct
comp, err := s.model.Completion(device, folder.ID)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
tot[deviceStr] += comp.CompletionPct
} else {
tot[deviceStr] = 0
}
@ -1860,3 +1894,16 @@ func errorString(err error) *string {
}
return nil
}
func isFolderNotFound(err error) bool {
for _, target := range []error{
model.ErrFolderMissing,
model.ErrFolderPaused,
model.ErrFolderNotRunning,
} {
if errors.Is(err, target) {
return true
}
}
return false
}

View File

@ -185,7 +185,7 @@ func BenchmarkNeedHalf(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := benchS.Snapshot()
snap := snapshot(b, benchS)
snap.WithNeed(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
count++
return true
@ -209,7 +209,7 @@ func BenchmarkNeedHalfRemote(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := fset.Snapshot()
snap := snapshot(b, fset)
snap.WithNeed(remoteDevice0, func(fi protocol.FileIntf) bool {
count++
return true
@ -230,7 +230,7 @@ func BenchmarkHave(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := benchS.Snapshot()
snap := snapshot(b, benchS)
snap.WithHave(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
count++
return true
@ -251,7 +251,7 @@ func BenchmarkGlobal(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := benchS.Snapshot()
snap := snapshot(b, benchS)
snap.WithGlobal(func(fi protocol.FileIntf) bool {
count++
return true
@ -272,7 +272,7 @@ func BenchmarkNeedHalfTruncated(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := benchS.Snapshot()
snap := snapshot(b, benchS)
snap.WithNeedTruncated(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
count++
return true
@ -293,7 +293,7 @@ func BenchmarkHaveTruncated(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := benchS.Snapshot()
snap := snapshot(b, benchS)
snap.WithHaveTruncated(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
count++
return true
@ -314,7 +314,7 @@ func BenchmarkGlobalTruncated(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
count := 0
snap := benchS.Snapshot()
snap := snapshot(b, benchS)
snap.WithGlobalTruncated(func(fi protocol.FileIntf) bool {
count++
return true
@ -336,7 +336,7 @@ func BenchmarkNeedCount(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
snap := benchS.Snapshot()
snap := snapshot(b, benchS)
_ = snap.NeedSize(protocol.LocalDeviceID)
snap.Release()
}

View File

@ -77,7 +77,7 @@ func TestIgnoredFiles(t *testing.T) {
// Local files should have the "ignored" bit in addition to just being
// generally invalid if we want to look at the simulation of that bit.
snap := fs.Snapshot()
snap := snapshot(t, fs)
defer snap.Release()
fi, ok := snap.Get(protocol.LocalDeviceID, "foo")
if !ok {
@ -866,7 +866,7 @@ func TestCheckLocalNeed(t *testing.T) {
fs.Update(remoteDevice0, files)
checkNeed := func() {
snap := fs.Snapshot()
snap := snapshot(t, fs)
defer snap.Release()
c := snap.NeedSize(protocol.LocalDeviceID)
if c.Files != 2 {
@ -974,7 +974,7 @@ func TestNeedAfterDropGlobal(t *testing.T) {
fs.Update(remoteDevice1, files[1:])
// remoteDevice1 needs one file: test
snap := fs.Snapshot()
snap := snapshot(t, fs)
c := snap.NeedSize(remoteDevice1)
if c.Files != 1 {
t.Errorf("Expected 1 needed files initially, got %v", c.Files)
@ -986,7 +986,7 @@ func TestNeedAfterDropGlobal(t *testing.T) {
fs.Drop(remoteDevice0)
// remoteDevice1 still needs test.
snap = fs.Snapshot()
snap = snapshot(t, fs)
c = snap.NeedSize(remoteDevice1)
if c.Files != 1 {
t.Errorf("Expected still 1 needed files, got %v", c.Files)

View File

@ -117,7 +117,7 @@ func TestRecalcMeta(t *testing.T) {
s1.Update(protocol.LocalDeviceID, files)
// Verify local/global size
snap := s1.Snapshot()
snap := snapshot(t, s1)
ls := snap.LocalSize()
gs := snap.GlobalSize()
snap.Release()
@ -149,7 +149,7 @@ func TestRecalcMeta(t *testing.T) {
}
// Verify that our bad data "took"
snap = s1.Snapshot()
snap = snapshot(t, s1)
ls = snap.LocalSize()
gs = snap.GlobalSize()
snap.Release()
@ -164,7 +164,7 @@ func TestRecalcMeta(t *testing.T) {
s2 := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeFake, "fake"), ldb)
// Verify local/global size
snap = s2.Snapshot()
snap = snapshot(t, s2)
ls = snap.LocalSize()
gs = snap.GlobalSize()
snap.Release()

View File

@ -151,12 +151,13 @@ type Snapshot struct {
fatalError func(error, string)
}
func (s *FileSet) Snapshot() *Snapshot {
func (s *FileSet) Snapshot() (*Snapshot, error) {
opStr := fmt.Sprintf("%s Snapshot()", s.folder)
l.Debugf(opStr)
t, err := s.db.newReadOnlyTransaction()
if err != nil {
fatalError(err, opStr, s.db)
s.db.handleFailure(err)
return nil, err
}
return &Snapshot{
folder: s.folder,
@ -165,7 +166,7 @@ func (s *FileSet) Snapshot() *Snapshot {
fatalError: func(err error, opStr string) {
fatalError(err, opStr, s.db)
},
}
}, nil
}
func (s *Snapshot) Release() {

View File

@ -45,9 +45,9 @@ func genBlocks(n int) []protocol.BlockInfo {
return b
}
func globalList(s *db.FileSet) []protocol.FileInfo {
func globalList(t testing.TB, s *db.FileSet) []protocol.FileInfo {
var fs []protocol.FileInfo
snap := s.Snapshot()
snap := snapshot(t, s)
defer snap.Release()
snap.WithGlobal(func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
@ -56,9 +56,9 @@ func globalList(s *db.FileSet) []protocol.FileInfo {
})
return fs
}
func globalListPrefixed(s *db.FileSet, prefix string) []db.FileInfoTruncated {
func globalListPrefixed(t testing.TB, s *db.FileSet, prefix string) []db.FileInfoTruncated {
var fs []db.FileInfoTruncated
snap := s.Snapshot()
snap := snapshot(t, s)
defer snap.Release()
snap.WithPrefixedGlobalTruncated(prefix, func(fi protocol.FileIntf) bool {
f := fi.(db.FileInfoTruncated)
@ -68,9 +68,9 @@ func globalListPrefixed(s *db.FileSet, prefix string) []db.FileInfoTruncated {
return fs
}
func haveList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
func haveList(t testing.TB, s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
var fs []protocol.FileInfo
snap := s.Snapshot()
snap := snapshot(t, s)
defer snap.Release()
snap.WithHave(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
@ -80,9 +80,9 @@ func haveList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
return fs
}
func haveListPrefixed(s *db.FileSet, n protocol.DeviceID, prefix string) []db.FileInfoTruncated {
func haveListPrefixed(t testing.TB, s *db.FileSet, n protocol.DeviceID, prefix string) []db.FileInfoTruncated {
var fs []db.FileInfoTruncated
snap := s.Snapshot()
snap := snapshot(t, s)
defer snap.Release()
snap.WithPrefixedHaveTruncated(n, prefix, func(fi protocol.FileIntf) bool {
f := fi.(db.FileInfoTruncated)
@ -92,9 +92,9 @@ func haveListPrefixed(s *db.FileSet, n protocol.DeviceID, prefix string) []db.Fi
return fs
}
func needList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
func needList(t testing.TB, s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
var fs []protocol.FileInfo
snap := s.Snapshot()
snap := snapshot(t, s)
defer snap.Release()
snap.WithNeed(n, func(fi protocol.FileIntf) bool {
f := fi.(protocol.FileInfo)
@ -221,7 +221,7 @@ func TestGlobalSet(t *testing.T) {
check := func() {
t.Helper()
g := fileList(globalList(m))
g := fileList(globalList(t, m))
sort.Sort(g)
if fmt.Sprint(g) != fmt.Sprint(expectedGlobal) {
@ -244,7 +244,7 @@ func TestGlobalSet(t *testing.T) {
}
globalBytes += f.FileSize()
}
gs := globalSize(m)
gs := globalSize(t, m)
if gs.Files != globalFiles {
t.Errorf("Incorrect GlobalSize files; %d != %d", gs.Files, globalFiles)
}
@ -258,7 +258,7 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("Incorrect GlobalSize bytes; %d != %d", gs.Bytes, globalBytes)
}
h := fileList(haveList(m, protocol.LocalDeviceID))
h := fileList(haveList(t, m, protocol.LocalDeviceID))
sort.Sort(h)
if fmt.Sprint(h) != fmt.Sprint(localTot) {
@ -281,7 +281,7 @@ func TestGlobalSet(t *testing.T) {
}
haveBytes += f.FileSize()
}
ls := localSize(m)
ls := localSize(t, m)
if ls.Files != haveFiles {
t.Errorf("Incorrect LocalSize files; %d != %d", ls.Files, haveFiles)
}
@ -295,14 +295,14 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("Incorrect LocalSize bytes; %d != %d", ls.Bytes, haveBytes)
}
h = fileList(haveList(m, remoteDevice0))
h = fileList(haveList(t, m, remoteDevice0))
sort.Sort(h)
if fmt.Sprint(h) != fmt.Sprint(remoteTot) {
t.Errorf("Have incorrect (remote);\n A: %v !=\n E: %v", h, remoteTot)
}
n := fileList(needList(m, protocol.LocalDeviceID))
n := fileList(needList(t, m, protocol.LocalDeviceID))
sort.Sort(n)
if fmt.Sprint(n) != fmt.Sprint(expectedLocalNeed) {
@ -311,7 +311,7 @@ func TestGlobalSet(t *testing.T) {
checkNeed(t, m, protocol.LocalDeviceID, expectedLocalNeed)
n = fileList(needList(m, remoteDevice0))
n = fileList(needList(t, m, remoteDevice0))
sort.Sort(n)
if fmt.Sprint(n) != fmt.Sprint(expectedRemoteNeed) {
@ -320,7 +320,7 @@ func TestGlobalSet(t *testing.T) {
checkNeed(t, m, remoteDevice0, expectedRemoteNeed)
snap := m.Snapshot()
snap := snapshot(t, m)
defer snap.Release()
f, ok := snap.Get(protocol.LocalDeviceID, "b")
if !ok {
@ -365,7 +365,7 @@ func TestGlobalSet(t *testing.T) {
check()
snap := m.Snapshot()
snap := snapshot(t, m)
av := []protocol.DeviceID{protocol.LocalDeviceID, remoteDevice0}
a := snap.Availability("a")
@ -431,14 +431,14 @@ func TestGlobalSet(t *testing.T) {
check()
h := fileList(haveList(m, remoteDevice1))
h := fileList(haveList(t, m, remoteDevice1))
sort.Sort(h)
if fmt.Sprint(h) != fmt.Sprint(secRemote) {
t.Errorf("Have incorrect (secRemote);\n A: %v !=\n E: %v", h, secRemote)
}
n := fileList(needList(m, remoteDevice1))
n := fileList(needList(t, m, remoteDevice1))
sort.Sort(n)
if fmt.Sprint(n) != fmt.Sprint(expectedSecRemoteNeed) {
@ -478,7 +478,7 @@ func TestNeedWithInvalid(t *testing.T) {
replace(s, remoteDevice0, remote0Have)
replace(s, remoteDevice1, remote1Have)
need := fileList(needList(s, protocol.LocalDeviceID))
need := fileList(needList(t, s, protocol.LocalDeviceID))
sort.Sort(need)
if fmt.Sprint(need) != fmt.Sprint(expectedNeed) {
@ -506,7 +506,7 @@ func TestUpdateToInvalid(t *testing.T) {
replace(s, protocol.LocalDeviceID, localHave)
have := fileList(haveList(s, protocol.LocalDeviceID))
have := fileList(haveList(t, s, protocol.LocalDeviceID))
sort.Sort(have)
if fmt.Sprint(have) != fmt.Sprint(localHave) {
@ -523,7 +523,7 @@ func TestUpdateToInvalid(t *testing.T) {
s.Update(protocol.LocalDeviceID, append(fileList{}, localHave[1], localHave[4]))
have = fileList(haveList(s, protocol.LocalDeviceID))
have = fileList(haveList(t, s, protocol.LocalDeviceID))
sort.Sort(have)
if fmt.Sprint(have) != fmt.Sprint(localHave) {
@ -567,7 +567,7 @@ func TestInvalidAvailability(t *testing.T) {
replace(s, remoteDevice0, remote0Have)
replace(s, remoteDevice1, remote1Have)
snap := s.Snapshot()
snap := snapshot(t, s)
defer snap.Release()
if av := snap.Availability("both"); len(av) != 2 {
@ -608,7 +608,7 @@ func TestGlobalReset(t *testing.T) {
}
replace(m, protocol.LocalDeviceID, local)
g := globalList(m)
g := globalList(t, m)
sort.Sort(fileList(g))
if diff, equal := messagediff.PrettyDiff(local, g); !equal {
@ -618,7 +618,7 @@ func TestGlobalReset(t *testing.T) {
replace(m, remoteDevice0, remote)
replace(m, remoteDevice0, nil)
g = globalList(m)
g = globalList(t, m)
sort.Sort(fileList(g))
if diff, equal := messagediff.PrettyDiff(local, g); !equal {
@ -655,7 +655,7 @@ func TestNeed(t *testing.T) {
replace(m, protocol.LocalDeviceID, local)
replace(m, remoteDevice0, remote)
need := needList(m, protocol.LocalDeviceID)
need := needList(t, m, protocol.LocalDeviceID)
sort.Sort(fileList(need))
sort.Sort(fileList(shouldNeed))
@ -725,10 +725,10 @@ func TestListDropFolder(t *testing.T) {
if diff, equal := messagediff.PrettyDiff(expectedFolderList, actualFolderList); !equal {
t.Fatalf("FolderList mismatch. Diff:\n%s", diff)
}
if l := len(globalList(s0)); l != 3 {
if l := len(globalList(t, s0)); l != 3 {
t.Errorf("Incorrect global length %d != 3 for s0", l)
}
if l := len(globalList(s1)); l != 3 {
if l := len(globalList(t, s1)); l != 3 {
t.Errorf("Incorrect global length %d != 3 for s1", l)
}
@ -741,10 +741,10 @@ func TestListDropFolder(t *testing.T) {
if diff, equal := messagediff.PrettyDiff(expectedFolderList, actualFolderList); !equal {
t.Fatalf("FolderList mismatch. Diff:\n%s", diff)
}
if l := len(globalList(s0)); l != 3 {
if l := len(globalList(t, s0)); l != 3 {
t.Errorf("Incorrect global length %d != 3 for s0", l)
}
if l := len(globalList(s1)); l != 0 {
if l := len(globalList(t, s1)); l != 0 {
t.Errorf("Incorrect global length %d != 0 for s1", l)
}
}
@ -780,13 +780,13 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: remoteDevice0.Short(), Value: 1002}}}},
}
need := fileList(needList(s, protocol.LocalDeviceID))
need := fileList(needList(t, s, protocol.LocalDeviceID))
if fmt.Sprint(need) != fmt.Sprint(total) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", need, total)
}
checkNeed(t, s, protocol.LocalDeviceID, total)
global := fileList(globalList(s))
global := fileList(globalList(t, s))
if fmt.Sprint(global) != fmt.Sprint(total) {
t.Errorf("Global incorrect;\n A: %v !=\n E: %v", global, total)
}
@ -810,7 +810,7 @@ func TestLongPath(t *testing.T) {
replace(s, protocol.LocalDeviceID, local)
gf := globalList(s)
gf := globalList(t, s)
if l := len(gf); l != 1 {
t.Fatalf("Incorrect len %d != 1 for global list", l)
}
@ -911,17 +911,17 @@ func TestDropFiles(t *testing.T) {
// Check that they're there
h := haveList(m, protocol.LocalDeviceID)
h := haveList(t, m, protocol.LocalDeviceID)
if len(h) != len(local0) {
t.Errorf("Incorrect number of files after update, %d != %d", len(h), len(local0))
}
h = haveList(m, remoteDevice0)
h = haveList(t, m, remoteDevice0)
if len(h) != len(remote0) {
t.Errorf("Incorrect number of files after update, %d != %d", len(h), len(local0))
}
g := globalList(m)
g := globalList(t, m)
if len(g) != len(local0) {
// local0 covers all files
t.Errorf("Incorrect global files after update, %d != %d", len(g), len(local0))
@ -931,17 +931,17 @@ func TestDropFiles(t *testing.T) {
m.Drop(protocol.LocalDeviceID)
h = haveList(m, protocol.LocalDeviceID)
h = haveList(t, m, protocol.LocalDeviceID)
if len(h) != 0 {
t.Errorf("Incorrect number of files after drop, %d != %d", len(h), 0)
}
h = haveList(m, remoteDevice0)
h = haveList(t, m, remoteDevice0)
if len(h) != len(remote0) {
t.Errorf("Incorrect number of files after update, %d != %d", len(h), len(local0))
}
g = globalList(m)
g = globalList(t, m)
if len(g) != len(remote0) {
// the ones in remote0 remain
t.Errorf("Incorrect global files after update, %d != %d", len(g), len(remote0))
@ -961,20 +961,20 @@ func TestIssue4701(t *testing.T) {
s.Update(protocol.LocalDeviceID, localHave)
if c := localSize(s); c.Files != 1 {
if c := localSize(t, s); c.Files != 1 {
t.Errorf("Expected 1 local file, got %v", c.Files)
}
if c := globalSize(s); c.Files != 1 {
if c := globalSize(t, s); c.Files != 1 {
t.Errorf("Expected 1 global file, got %v", c.Files)
}
localHave[1].LocalFlags = 0
s.Update(protocol.LocalDeviceID, localHave)
if c := localSize(s); c.Files != 2 {
if c := localSize(t, s); c.Files != 2 {
t.Errorf("Expected 2 local files, got %v", c.Files)
}
if c := globalSize(s); c.Files != 2 {
if c := globalSize(t, s); c.Files != 2 {
t.Errorf("Expected 2 global files, got %v", c.Files)
}
@ -982,10 +982,10 @@ func TestIssue4701(t *testing.T) {
localHave[1].LocalFlags = protocol.FlagLocalIgnored
s.Update(protocol.LocalDeviceID, localHave)
if c := localSize(s); c.Files != 0 {
if c := localSize(t, s); c.Files != 0 {
t.Errorf("Expected 0 local files, got %v", c.Files)
}
if c := globalSize(s); c.Files != 0 {
if c := globalSize(t, s); c.Files != 0 {
t.Errorf("Expected 0 global files, got %v", c.Files)
}
}
@ -1009,7 +1009,7 @@ func TestWithHaveSequence(t *testing.T) {
replace(s, protocol.LocalDeviceID, localHave)
i := 2
snap := s.Snapshot()
snap := snapshot(t, s)
defer snap.Release()
snap.WithHaveSequence(int64(i), func(fi protocol.FileIntf) bool {
if f := fi.(protocol.FileInfo); !f.IsEquivalent(localHave[i-1], 0) {
@ -1061,7 +1061,7 @@ loop:
break loop
default:
}
snap := s.Snapshot()
snap := snapshot(t, s)
snap.WithHaveSequence(prevSeq+1, func(fi protocol.FileIntf) bool {
if fi.SequenceNo() < prevSeq+1 {
t.Fatal("Skipped ", prevSeq+1, fi.SequenceNo())
@ -1089,11 +1089,11 @@ func TestIssue4925(t *testing.T) {
replace(s, protocol.LocalDeviceID, localHave)
for _, prefix := range []string{"dir", "dir/"} {
pl := haveListPrefixed(s, protocol.LocalDeviceID, prefix)
pl := haveListPrefixed(t, s, protocol.LocalDeviceID, prefix)
if l := len(pl); l != 2 {
t.Errorf("Expected 2, got %v local items below %v", l, prefix)
}
pl = globalListPrefixed(s, prefix)
pl = globalListPrefixed(t, s, prefix)
if l := len(pl); l != 2 {
t.Errorf("Expected 2, got %v global items below %v", l, prefix)
}
@ -1114,24 +1114,24 @@ func TestMoveGlobalBack(t *testing.T) {
s.Update(protocol.LocalDeviceID, localHave)
s.Update(remoteDevice0, remote0Have)
if need := needList(s, protocol.LocalDeviceID); len(need) != 1 {
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 1 {
t.Error("Expected 1 local need, got", need)
} else if !need[0].IsEquivalent(remote0Have[0], 0) {
t.Errorf("Local need incorrect;\n A: %v !=\n E: %v", need[0], remote0Have[0])
}
checkNeed(t, s, protocol.LocalDeviceID, remote0Have[:1])
if need := needList(s, remoteDevice0); len(need) != 0 {
if need := needList(t, s, remoteDevice0); len(need) != 0 {
t.Error("Expected no need for remote 0, got", need)
}
checkNeed(t, s, remoteDevice0, nil)
ls := localSize(s)
ls := localSize(t, s)
if haveBytes := localHave[0].Size; ls.Bytes != haveBytes {
t.Errorf("Incorrect LocalSize bytes; %d != %d", ls.Bytes, haveBytes)
}
gs := globalSize(s)
gs := globalSize(t, s)
if globalBytes := remote0Have[0].Size; gs.Bytes != globalBytes {
t.Errorf("Incorrect GlobalSize bytes; %d != %d", gs.Bytes, globalBytes)
}
@ -1142,24 +1142,24 @@ func TestMoveGlobalBack(t *testing.T) {
remote0Have[0].Version = remote0Have[0].Version.Update(remoteDevice0.Short()).DropOthers(remoteDevice0.Short())
s.Update(remoteDevice0, remote0Have)
if need := needList(s, remoteDevice0); len(need) != 1 {
if need := needList(t, s, remoteDevice0); len(need) != 1 {
t.Error("Expected 1 need for remote 0, got", need)
} else if !need[0].IsEquivalent(localHave[0], 0) {
t.Errorf("Need for remote 0 incorrect;\n A: %v !=\n E: %v", need[0], localHave[0])
}
checkNeed(t, s, remoteDevice0, localHave[:1])
if need := needList(s, protocol.LocalDeviceID); len(need) != 0 {
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 0 {
t.Error("Expected no local need, got", need)
}
checkNeed(t, s, protocol.LocalDeviceID, nil)
ls = localSize(s)
ls = localSize(t, s)
if haveBytes := localHave[0].Size; ls.Bytes != haveBytes {
t.Errorf("Incorrect LocalSize bytes; %d != %d", ls.Bytes, haveBytes)
}
gs = globalSize(s)
gs = globalSize(t, s)
if globalBytes := localHave[0].Size; gs.Bytes != globalBytes {
t.Errorf("Incorrect GlobalSize bytes; %d != %d", gs.Bytes, globalBytes)
}
@ -1181,7 +1181,7 @@ func TestIssue5007(t *testing.T) {
s.Update(remoteDevice0, fs)
if need := needList(s, protocol.LocalDeviceID); len(need) != 1 {
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 1 {
t.Fatal("Expected 1 local need, got", need)
} else if !need[0].IsEquivalent(fs[0], 0) {
t.Fatalf("Local need incorrect;\n A: %v !=\n E: %v", need[0], fs[0])
@ -1191,7 +1191,7 @@ func TestIssue5007(t *testing.T) {
fs[0].LocalFlags = protocol.FlagLocalIgnored
s.Update(protocol.LocalDeviceID, fs)
if need := needList(s, protocol.LocalDeviceID); len(need) != 0 {
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 0 {
t.Fatal("Expected no local need, got", need)
}
checkNeed(t, s, protocol.LocalDeviceID, nil)
@ -1211,7 +1211,7 @@ func TestNeedDeleted(t *testing.T) {
s.Update(remoteDevice0, fs)
if need := needList(s, protocol.LocalDeviceID); len(need) != 0 {
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 0 {
t.Fatal("Expected no local need, got", need)
}
checkNeed(t, s, protocol.LocalDeviceID, nil)
@ -1220,7 +1220,7 @@ func TestNeedDeleted(t *testing.T) {
fs[0].Version = fs[0].Version.Update(remoteDevice0.Short())
s.Update(remoteDevice0, fs)
if need := needList(s, protocol.LocalDeviceID); len(need) != 1 {
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 1 {
t.Fatal("Expected 1 local need, got", need)
} else if !need[0].IsEquivalent(fs[0], 0) {
t.Fatalf("Local need incorrect;\n A: %v !=\n E: %v", need[0], fs[0])
@ -1231,7 +1231,7 @@ func TestNeedDeleted(t *testing.T) {
fs[0].Version = fs[0].Version.Update(remoteDevice0.Short())
s.Update(remoteDevice0, fs)
if need := needList(s, protocol.LocalDeviceID); len(need) != 0 {
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 0 {
t.Fatal("Expected no local need, got", need)
}
checkNeed(t, s, protocol.LocalDeviceID, nil)
@ -1261,22 +1261,22 @@ func TestReceiveOnlyAccounting(t *testing.T) {
replace(s, protocol.LocalDeviceID, files)
replace(s, remote, files)
if n := localSize(s).Files; n != 3 {
if n := localSize(t, s).Files; n != 3 {
t.Fatal("expected 3 local files initially, not", n)
}
if n := localSize(s).Bytes; n != 30 {
if n := localSize(t, s).Bytes; n != 30 {
t.Fatal("expected 30 local bytes initially, not", n)
}
if n := globalSize(s).Files; n != 3 {
if n := globalSize(t, s).Files; n != 3 {
t.Fatal("expected 3 global files initially, not", n)
}
if n := globalSize(s).Bytes; n != 30 {
if n := globalSize(t, s).Bytes; n != 30 {
t.Fatal("expected 30 global bytes initially, not", n)
}
if n := receiveOnlyChangedSize(s).Files; n != 0 {
if n := receiveOnlyChangedSize(t, s).Files; n != 0 {
t.Fatal("expected 0 receive only changed files initially, not", n)
}
if n := receiveOnlyChangedSize(s).Bytes; n != 0 {
if n := receiveOnlyChangedSize(t, s).Bytes; n != 0 {
t.Fatal("expected 0 receive only changed bytes initially, not", n)
}
@ -1291,22 +1291,22 @@ func TestReceiveOnlyAccounting(t *testing.T) {
// Check that we see the files
if n := localSize(s).Files; n != 3 {
if n := localSize(t, s).Files; n != 3 {
t.Fatal("expected 3 local files after local change, not", n)
}
if n := localSize(s).Bytes; n != 120 {
if n := localSize(t, s).Bytes; n != 120 {
t.Fatal("expected 120 local bytes after local change, not", n)
}
if n := globalSize(s).Files; n != 3 {
if n := globalSize(t, s).Files; n != 3 {
t.Fatal("expected 3 global files after local change, not", n)
}
if n := globalSize(s).Bytes; n != 30 {
if n := globalSize(t, s).Bytes; n != 30 {
t.Fatal("expected 30 global files after local change, not", n)
}
if n := receiveOnlyChangedSize(s).Files; n != 1 {
if n := receiveOnlyChangedSize(t, s).Files; n != 1 {
t.Fatal("expected 1 receive only changed file after local change, not", n)
}
if n := receiveOnlyChangedSize(s).Bytes; n != 100 {
if n := receiveOnlyChangedSize(t, s).Bytes; n != 100 {
t.Fatal("expected 100 receive only changed btyes after local change, not", n)
}
@ -1322,22 +1322,22 @@ func TestReceiveOnlyAccounting(t *testing.T) {
// Check that we see the files, same data as initially
if n := localSize(s).Files; n != 3 {
if n := localSize(t, s).Files; n != 3 {
t.Fatal("expected 3 local files after revert, not", n)
}
if n := localSize(s).Bytes; n != 30 {
if n := localSize(t, s).Bytes; n != 30 {
t.Fatal("expected 30 local bytes after revert, not", n)
}
if n := globalSize(s).Files; n != 3 {
if n := globalSize(t, s).Files; n != 3 {
t.Fatal("expected 3 global files after revert, not", n)
}
if n := globalSize(s).Bytes; n != 30 {
if n := globalSize(t, s).Bytes; n != 30 {
t.Fatal("expected 30 global bytes after revert, not", n)
}
if n := receiveOnlyChangedSize(s).Files; n != 0 {
if n := receiveOnlyChangedSize(t, s).Files; n != 0 {
t.Fatal("expected 0 receive only changed files after revert, not", n)
}
if n := receiveOnlyChangedSize(s).Bytes; n != 0 {
if n := receiveOnlyChangedSize(t, s).Bytes; n != 0 {
t.Fatal("expected 0 receive only changed bytes after revert, not", n)
}
}
@ -1366,7 +1366,7 @@ func TestNeedAfterUnignore(t *testing.T) {
local.ModifiedS = 0
s.Update(protocol.LocalDeviceID, fileList{local})
if need := needList(s, protocol.LocalDeviceID); len(need) != 1 {
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 1 {
t.Fatal("Expected one local need, got", need)
} else if !need[0].IsEquivalent(remote, 0) {
t.Fatalf("Got %v, expected %v", need[0], remote)
@ -1387,7 +1387,7 @@ func TestRemoteInvalidNotAccounted(t *testing.T) {
}
s.Update(remoteDevice0, files)
global := globalSize(s)
global := globalSize(t, s)
if global.Files != 1 {
t.Error("Expected one file in global size, not", global.Files)
}
@ -1411,7 +1411,7 @@ func TestNeedWithNewerInvalid(t *testing.T) {
s.Update(remoteDevice0, fileList{file})
s.Update(remoteDevice1, fileList{file})
need := needList(s, protocol.LocalDeviceID)
need := needList(t, s, protocol.LocalDeviceID)
if len(need) != 1 {
t.Fatal("Locally missing file should be needed")
}
@ -1427,7 +1427,7 @@ func TestNeedWithNewerInvalid(t *testing.T) {
s.Update(remoteDevice1, fileList{inv})
// We still have an old file, we need the newest valid file
need = needList(s, protocol.LocalDeviceID)
need = needList(t, s, protocol.LocalDeviceID)
if len(need) != 1 {
t.Fatal("Locally missing file should be needed regardless of invalid files")
}
@ -1452,13 +1452,13 @@ func TestNeedAfterDeviceRemove(t *testing.T) {
s.Update(remoteDevice0, fs)
if need := needList(s, protocol.LocalDeviceID); len(need) != 1 {
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 1 {
t.Fatal("Expected one local need, got", need)
}
s.Drop(remoteDevice0)
if need := needList(s, protocol.LocalDeviceID); len(need) != 0 {
if need := needList(t, s, protocol.LocalDeviceID); len(need) != 0 {
t.Fatal("Expected no local need, got", need)
}
checkNeed(t, s, protocol.LocalDeviceID, nil)
@ -1481,7 +1481,7 @@ func TestCaseSensitive(t *testing.T) {
replace(s, protocol.LocalDeviceID, local)
gf := globalList(s)
gf := globalList(t, s)
if l := len(gf); l != len(local) {
t.Fatalf("Incorrect len %d != %d for global list", l, len(local))
}
@ -1551,7 +1551,7 @@ func TestSequenceIndex(t *testing.T) {
// a subset of those files if we manage to run before a complete
// update has happened since our last iteration.
latest = latest[:0]
snap := s.Snapshot()
snap := snapshot(t, s)
snap.WithHaveSequence(seq+1, func(f protocol.FileIntf) bool {
seen[f.FileName()] = f
latest = append(latest, f)
@ -1617,7 +1617,7 @@ func TestIgnoreAfterReceiveOnly(t *testing.T) {
s.Update(protocol.LocalDeviceID, fs)
snap := s.Snapshot()
snap := snapshot(t, s)
defer snap.Release()
if f, ok := snap.Get(protocol.LocalDeviceID, file); !ok {
t.Error("File missing in db")
@ -1654,7 +1654,7 @@ func TestUpdateWithOneFileTwice(t *testing.T) {
s.Update(protocol.LocalDeviceID, fs)
snap := s.Snapshot()
snap := snapshot(t, s)
defer snap.Release()
count := 0
snap.WithHaveSequence(0, func(f protocol.FileIntf) bool {
@ -1678,7 +1678,7 @@ func TestNeedRemoteOnly(t *testing.T) {
}
s.Update(remoteDevice0, remote0Have)
need := needSize(s, remoteDevice0)
need := needSize(t, s, remoteDevice0)
if !need.Equal(db.Counts{}) {
t.Error("Expected nothing needed, got", need)
}
@ -1697,14 +1697,14 @@ func TestNeedRemoteAfterReset(t *testing.T) {
s.Update(protocol.LocalDeviceID, files)
s.Update(remoteDevice0, files)
need := needSize(s, remoteDevice0)
need := needSize(t, s, remoteDevice0)
if !need.Equal(db.Counts{}) {
t.Error("Expected nothing needed, got", need)
}
s.Drop(remoteDevice0)
need = needSize(s, remoteDevice0)
need = needSize(t, s, remoteDevice0)
if exp := (db.Counts{Files: 1}); !need.Equal(exp) {
t.Errorf("Expected %v, got %v", exp, need)
}
@ -1723,10 +1723,10 @@ func TestIgnoreLocalChanged(t *testing.T) {
}
s.Update(protocol.LocalDeviceID, files)
if c := globalSize(s).Files; c != 0 {
if c := globalSize(t, s).Files; c != 0 {
t.Error("Expected no global file, got", c)
}
if c := localSize(s).Files; c != 1 {
if c := localSize(t, s).Files; c != 1 {
t.Error("Expected one local file, got", c)
}
@ -1734,10 +1734,10 @@ func TestIgnoreLocalChanged(t *testing.T) {
files[0].LocalFlags = protocol.FlagLocalIgnored
s.Update(protocol.LocalDeviceID, files)
if c := globalSize(s).Files; c != 0 {
if c := globalSize(t, s).Files; c != 0 {
t.Error("Expected no global file, got", c)
}
if c := localSize(s).Files; c != 0 {
if c := localSize(t, s).Files; c != 0 {
t.Error("Expected no local file, got", c)
}
}
@ -1789,26 +1789,26 @@ func replace(fs *db.FileSet, device protocol.DeviceID, files []protocol.FileInfo
fs.Update(device, files)
}
func localSize(fs *db.FileSet) db.Counts {
snap := fs.Snapshot()
func localSize(t testing.TB, fs *db.FileSet) db.Counts {
snap := snapshot(t, fs)
defer snap.Release()
return snap.LocalSize()
}
func globalSize(fs *db.FileSet) db.Counts {
snap := fs.Snapshot()
func globalSize(t testing.TB, fs *db.FileSet) db.Counts {
snap := snapshot(t, fs)
defer snap.Release()
return snap.GlobalSize()
}
func needSize(fs *db.FileSet, id protocol.DeviceID) db.Counts {
snap := fs.Snapshot()
func needSize(t testing.TB, fs *db.FileSet, id protocol.DeviceID) db.Counts {
snap := snapshot(t, fs)
defer snap.Release()
return snap.NeedSize(id)
}
func receiveOnlyChangedSize(fs *db.FileSet) db.Counts {
snap := fs.Snapshot()
func receiveOnlyChangedSize(t testing.TB, fs *db.FileSet) db.Counts {
snap := snapshot(t, fs)
defer snap.Release()
return snap.ReceiveOnlyChangedSize()
}
@ -1833,7 +1833,7 @@ func filesToCounts(files []protocol.FileInfo) db.Counts {
func checkNeed(t testing.TB, s *db.FileSet, dev protocol.DeviceID, expected []protocol.FileInfo) {
t.Helper()
counts := needSize(s, dev)
counts := needSize(t, s, dev)
if exp := filesToCounts(expected); !exp.Equal(counts) {
t.Errorf("Count incorrect (%v): expected %v, got %v", dev, exp, counts)
}
@ -1860,3 +1860,12 @@ func newFileSet(t testing.TB, folder string, fs fs.Filesystem, ll *db.Lowlevel)
}
return fset
}
func snapshot(t testing.TB, fset *db.FileSet) *db.Snapshot {
t.Helper()
snap, err := fset.Snapshot()
if err != nil {
t.Fatal(err)
}
return snap
}

View File

@ -92,6 +92,15 @@ func newFileSet(t testing.TB, folder string, fs fs.Filesystem, db *Lowlevel) *Fi
return fset
}
func snapshot(t testing.TB, fset *FileSet) *Snapshot {
t.Helper()
snap, err := fset.Snapshot()
if err != nil {
t.Fatal(err)
}
return snap
}
// The following commented tests were used to generate jsons files to stdout for
// future tests and are kept here for reference (reuse).

View File

@ -27,6 +27,7 @@ import (
"github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/stats"
"github.com/syncthing/syncthing/lib/svcutil"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/util"
"github.com/syncthing/syncthing/lib/versioner"
@ -87,7 +88,7 @@ type syncRequest struct {
}
type puller interface {
pull() bool // true when successful and should not be retried
pull() (bool, error) // true when successful and should not be retried
}
func newFolder(model *model, fset *db.FileSet, ignores *ignore.Matcher, cfg config.FolderConfiguration, evLogger events.Logger, ioLimiter *byteSemaphore, ver versioner.Versioner) folder {
@ -164,16 +165,20 @@ func (f *folder) Serve(ctx context.Context) error {
initialCompleted := f.initialScanFinished
for {
var err error
select {
case <-f.ctx.Done():
close(f.done)
return nil
case <-f.pullScheduled:
f.pull()
_, err = f.pull()
case <-f.pullFailTimer.C:
if !f.pull() && f.pullPause < 60*f.pullBasePause() {
var success bool
success, err = f.pull()
if (err != nil || !success) && f.pullPause < 60*f.pullBasePause() {
// Back off from retrying to pull
f.pullPause *= 2
}
@ -181,18 +186,19 @@ func (f *folder) Serve(ctx context.Context) error {
case <-initialCompleted:
// Initial scan has completed, we should do a pull
initialCompleted = nil // never hit this case again
f.pull()
_, err = f.pull()
case <-f.forcedRescanRequested:
f.handleForcedRescans()
err = f.handleForcedRescans()
case <-f.scanTimer.C:
l.Debugln(f, "Scanning due to timer")
f.scanTimerFired()
err = f.scanTimerFired()
case req := <-f.doInSyncChan:
l.Debugln(f, "Running something due to request")
req.err <- req.fn()
err = req.fn()
req.err <- err
case next := <-f.scanDelay:
l.Debugln(f, "Delaying scan")
@ -200,16 +206,23 @@ func (f *folder) Serve(ctx context.Context) error {
case fsEvents := <-f.watchChan:
l.Debugln(f, "Scan due to watcher")
f.scanSubdirs(fsEvents)
err = f.scanSubdirs(fsEvents)
case <-f.restartWatchChan:
l.Debugln(f, "Restart watcher")
f.restartWatch()
err = f.restartWatch()
case <-f.versionCleanupTimer.C:
l.Debugln(f, "Doing version cleanup")
f.versionCleanupTimerFired()
}
if err != nil {
if svcutil.IsFatal(err) {
return err
}
f.setError(err)
}
}
}
@ -307,7 +320,7 @@ func (f *folder) getHealthErrorWithoutIgnores() error {
return nil
}
func (f *folder) pull() (success bool) {
func (f *folder) pull() (success bool, err error) {
f.pullFailTimer.Stop()
select {
case <-f.pullFailTimer.C:
@ -318,7 +331,7 @@ func (f *folder) pull() (success bool) {
case <-f.initialScanFinished:
default:
// Once the initial scan finished, a pull will be scheduled
return true
return true, nil
}
defer func() {
@ -330,7 +343,10 @@ func (f *folder) pull() (success bool) {
// If there is nothing to do, don't even enter sync-waiting state.
abort := true
snap := f.fset.Snapshot()
snap, err := f.dbSnapshot()
if err != nil {
return false, err
}
snap.WithNeed(protocol.LocalDeviceID, func(intf protocol.FileIntf) bool {
abort = false
return false
@ -341,16 +357,16 @@ func (f *folder) pull() (success bool) {
f.errorsMut.Lock()
f.pullErrors = nil
f.errorsMut.Unlock()
return true
return true, nil
}
// Abort early (before acquiring a token) if there's a folder error
err := f.getHealthErrorWithoutIgnores()
f.setError(err)
err = f.getHealthErrorWithoutIgnores()
if err != nil {
l.Debugln("Skipping pull of", f.Description(), "due to folder error:", err)
return false
return false, err
}
f.setError(nil)
// Send only folder doesn't do any io, it only checks for out-of-sync
// items that differ in metadata and updates those.
@ -358,8 +374,7 @@ func (f *folder) pull() (success bool) {
f.setState(FolderSyncWaiting)
if err := f.ioLimiter.takeWithContext(f.ctx, 1); err != nil {
f.setError(err)
return true
return true, err
}
defer f.ioLimiter.give(1)
}
@ -374,23 +389,23 @@ func (f *folder) pull() (success bool) {
}
}()
err = f.getHealthErrorAndLoadIgnores()
f.setError(err)
if err != nil {
l.Debugln("Skipping pull of", f.Description(), "due to folder error:", err)
return false
return false, err
}
success = f.puller.pull()
success, err = f.puller.pull()
if success {
return true
if success && err == nil {
return true, nil
}
// Pulling failed, try again later.
delay := f.pullPause + time.Since(startTime)
l.Infof("Folder %v isn't making sync progress - retrying in %v.", f.Description(), util.NiceDurationString(delay))
f.pullFailTimer.Reset(delay)
return false
return false, err
}
func (f *folder) scanSubdirs(subDirs []string) error {
@ -399,7 +414,6 @@ func (f *folder) scanSubdirs(subDirs []string) error {
oldHash := f.ignores.Hash()
err := f.getHealthErrorAndLoadIgnores()
f.setError(err)
if err != nil {
// If there is a health error we set it as the folder error. We do not
// clear the folder error if there is no health error, as there might be
@ -407,6 +421,7 @@ func (f *folder) scanSubdirs(subDirs []string) error {
// we do not use the CheckHealth() convenience function here.
return err
}
f.setError(nil)
// Check on the way out if the ignore patterns changed as part of scanning
// this folder. If they did we should schedule a pull of the folder so that
@ -443,7 +458,10 @@ func (f *folder) scanSubdirs(subDirs []string) error {
// Clean the list of subitems to ensure that we start at a known
// directory, and don't scan subdirectories of things we've already
// scanned.
snap := f.fset.Snapshot()
snap, err := f.dbSnapshot()
if err != nil {
return err
}
subDirs = unifySubs(subDirs, func(file string) bool {
_, ok := snap.Get(protocol.LocalDeviceID, file)
return ok
@ -560,7 +578,10 @@ func (f *folder) scanSubdirsBatchAppendFunc(batch *fileInfoBatch) batchAppendFun
func (f *folder) scanSubdirsChangedAndNew(subDirs []string, batch *fileInfoBatch, batchAppend batchAppendFunc) (int, error) {
changes := 0
snap := f.fset.Snapshot()
snap, err := f.dbSnapshot()
if err != nil {
return changes, err
}
defer snap.Release()
// If we return early e.g. due to a folder health error, the scan needs
@ -629,7 +650,10 @@ func (f *folder) scanSubdirsDeletedAndIgnored(subDirs []string, batch *fileInfoB
var toIgnore []db.FileInfoTruncated
ignoredParent := ""
changes := 0
snap := f.fset.Snapshot()
snap, err := f.dbSnapshot()
if err != nil {
return 0, err
}
defer snap.Release()
for _, sub := range subDirs {
@ -821,7 +845,7 @@ func (f *folder) findRename(snap *db.Snapshot, file protocol.FileInfo, alreadyUs
return nf, found
}
func (f *folder) scanTimerFired() {
func (f *folder) scanTimerFired() error {
err := f.scanSubdirs(nil)
select {
@ -836,6 +860,8 @@ func (f *folder) scanTimerFired() {
}
f.Reschedule()
return err
}
func (f *folder) versionCleanupTimerFired() {
@ -884,10 +910,10 @@ func (f *folder) scheduleWatchRestart() {
// restartWatch should only ever be called synchronously. If you want to use
// this asynchronously, you should probably use scheduleWatchRestart instead.
func (f *folder) restartWatch() {
func (f *folder) restartWatch() error {
f.stopWatch()
f.startWatch()
f.scanSubdirs(nil)
return f.scanSubdirs(nil)
}
// startWatch should only ever be called synchronously. If you want to use
@ -1166,7 +1192,7 @@ func (f *folder) emitDiskChangeEvents(fs []protocol.FileInfo, typeOfEvent events
}
}
func (f *folder) handleForcedRescans() {
func (f *folder) handleForcedRescans() error {
f.forcedRescanPathsMut.Lock()
paths := make([]string, 0, len(f.forcedRescanPaths))
for path := range f.forcedRescanPaths {
@ -1175,7 +1201,7 @@ func (f *folder) handleForcedRescans() {
f.forcedRescanPaths = make(map[string]struct{})
f.forcedRescanPathsMut.Unlock()
if len(paths) == 0 {
return
return nil
}
batch := newFileInfoBatch(func(fs []protocol.FileInfo) error {
@ -1183,10 +1209,16 @@ func (f *folder) handleForcedRescans() {
return nil
})
snap := f.fset.Snapshot()
snap, err := f.dbSnapshot()
if err != nil {
return err
}
defer snap.Release()
for _, path := range paths {
_ = batch.flushIfFull()
if err := batch.flushIfFull(); err != nil {
return err
}
fi, ok := snap.Get(protocol.LocalDeviceID, path)
if !ok {
@ -1196,11 +1228,21 @@ func (f *folder) handleForcedRescans() {
batch.append(fi)
}
snap.Release()
if err = batch.flush(); err != nil {
return err
}
_ = batch.flush()
return f.scanSubdirs(paths)
}
_ = f.scanSubdirs(paths)
// dbSnapshots gets a snapshot from the fileset, and wraps any error
// in a svcutil.FatalErr.
func (f *folder) dbSnapshot() (*db.Snapshot, error) {
snap, err := f.fset.Snapshot()
if err != nil {
return nil, svcutil.AsFatalErr(err, svcutil.ExitError)
}
return snap, nil
}
// The exists function is expected to return true for all known paths

View File

@ -32,10 +32,10 @@ func newReceiveEncryptedFolder(model *model, fset *db.FileSet, ignores *ignore.M
}
func (f *receiveEncryptedFolder) Revert() {
f.doInSync(func() error { f.revert(); return nil })
f.doInSync(f.revert)
}
func (f *receiveEncryptedFolder) revert() {
func (f *receiveEncryptedFolder) revert() error {
l.Infof("Reverting unexpected items in folder %v (receive-encrypted)", f.Description())
f.setState(FolderScanning)
@ -46,7 +46,10 @@ func (f *receiveEncryptedFolder) revert() {
return nil
})
snap := f.fset.Snapshot()
snap, err := f.dbSnapshot()
if err != nil {
return err
}
defer snap.Release()
var iterErr error
var dirs []string
@ -85,12 +88,10 @@ func (f *receiveEncryptedFolder) revert() {
f.revertHandleDirs(dirs, snap)
if iterErr == nil {
iterErr = batch.flush()
}
if iterErr != nil {
l.Infoln("Failed to delete unexpected items:", iterErr)
return iterErr
}
return batch.flush()
}
func (f *receiveEncryptedFolder) revertHandleDirs(dirs []string, snap *db.Snapshot) {

View File

@ -63,10 +63,10 @@ func newReceiveOnlyFolder(model *model, fset *db.FileSet, ignores *ignore.Matche
}
func (f *receiveOnlyFolder) Revert() {
f.doInSync(func() error { f.revert(); return nil })
f.doInSync(f.revert)
}
func (f *receiveOnlyFolder) revert() {
func (f *receiveOnlyFolder) revert() error {
l.Infof("Reverting folder %v", f.Description)
f.setState(FolderScanning)
@ -84,7 +84,10 @@ func (f *receiveOnlyFolder) revert() {
batch := make([]protocol.FileInfo, 0, maxBatchSizeFiles)
batchSizeBytes := 0
snap := f.fset.Snapshot()
snap, err := f.dbSnapshot()
if err != nil {
return err
}
defer snap.Release()
snap.WithHave(protocol.LocalDeviceID, func(intf protocol.FileIntf) bool {
fi := intf.(protocol.FileInfo)
@ -161,6 +164,8 @@ func (f *receiveOnlyFolder) revert() {
// pull by itself. Make sure we schedule one so that we start
// downloading files.
f.SchedulePull()
return nil
}
// deleteQueue handles deletes by delegating to a handler and queuing

View File

@ -384,7 +384,7 @@ func TestRecvOnlyRemoteUndoChanges(t *testing.T) {
// Do the same changes on the remote
files := make([]protocol.FileInfo, 0, 2)
snap := f.fset.Snapshot()
snap := fsetSnapshot(t, f.fset)
snap.WithHave(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
if n := fi.FileName(); n != file && n != knownFile {
return true

View File

@ -36,11 +36,14 @@ func (f *sendOnlyFolder) PullErrors() []FileError {
}
// pull checks need for files that only differ by metadata (no changes on disk)
func (f *sendOnlyFolder) pull() bool {
func (f *sendOnlyFolder) pull() (bool, error) {
batch := make([]protocol.FileInfo, 0, maxBatchSizeFiles)
batchSizeBytes := 0
snap := f.fset.Snapshot()
snap, err := f.dbSnapshot()
if err != nil {
return false, err
}
defer snap.Release()
snap.WithNeed(protocol.LocalDeviceID, func(intf protocol.FileIntf) bool {
if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
@ -83,14 +86,14 @@ func (f *sendOnlyFolder) pull() bool {
f.updateLocalsFromPulling(batch)
}
return true
return true, nil
}
func (f *sendOnlyFolder) Override() {
f.doInSync(func() error { f.override(); return nil })
f.doInSync(f.override)
}
func (f *sendOnlyFolder) override() {
func (f *sendOnlyFolder) override() error {
l.Infoln("Overriding global state on folder", f.Description())
f.setState(FolderScanning)
@ -98,7 +101,10 @@ func (f *sendOnlyFolder) override() {
batch := make([]protocol.FileInfo, 0, maxBatchSizeFiles)
batchSizeBytes := 0
snap := f.fset.Snapshot()
snap, err := f.dbSnapshot()
if err != nil {
return err
}
defer snap.Release()
snap.WithNeed(protocol.LocalDeviceID, func(fi protocol.FileIntf) bool {
need := fi.(protocol.FileInfo)
@ -130,4 +136,5 @@ func (f *sendOnlyFolder) override() {
if len(batch) > 0 {
f.updateLocalsFromScanning(batch)
}
return nil
}

View File

@ -156,7 +156,7 @@ func newSendReceiveFolder(model *model, fset *db.FileSet, ignores *ignore.Matche
// pull returns true if it manages to get all needed items from peers, i.e. get
// the device in sync with the global state.
func (f *sendReceiveFolder) pull() bool {
func (f *sendReceiveFolder) pull() (bool, error) {
l.Debugf("%v pulling", f)
scanChan := make(chan string)
@ -173,10 +173,11 @@ func (f *sendReceiveFolder) pull() bool {
f.pullErrors = nil
f.errorsMut.Unlock()
var err error
for tries := 0; tries < maxPullerIterations; tries++ {
select {
case <-f.ctx.Done():
return false
return false, f.ctx.Err()
default:
}
@ -184,7 +185,10 @@ func (f *sendReceiveFolder) pull() bool {
// it to FolderSyncing during the last iteration.
f.setState(FolderSyncPreparing)
changed = f.pullerIteration(scanChan)
changed, err = f.pullerIteration(scanChan)
if err != nil {
return false, err
}
l.Debugln(f, "changed", changed, "on try", tries+1)
@ -219,19 +223,22 @@ func (f *sendReceiveFolder) pull() bool {
})
}
return changed == 0
return changed == 0, nil
}
// pullerIteration runs a single puller iteration for the given folder and
// returns the number items that should have been synced (even those that
// might have failed). One puller iteration handles all files currently
// flagged as needed in the folder.
func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) int {
func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) (int, error) {
f.errorsMut.Lock()
f.tempPullErrors = make(map[string]string)
f.errorsMut.Unlock()
snap := f.fset.Snapshot()
snap, err := f.dbSnapshot()
if err != nil {
return 0, err
}
defer snap.Release()
pullChan := make(chan pullBlockState)
@ -265,7 +272,7 @@ func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) int {
pullWg.Add(1)
go func() {
// pullerRoutine finishes when pullChan is closed
f.pullerRoutine(pullChan, finisherChan)
f.pullerRoutine(snap, pullChan, finisherChan)
pullWg.Done()
}()
@ -300,7 +307,7 @@ func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) int {
f.queue.Reset()
return changed
return changed, nil
}
func (f *sendReceiveFolder) processNeeded(snap *db.Snapshot, dbUpdateChan chan<- dbUpdateJob, copyChan chan<- copyBlocksState, scanChan chan<- string) (int, map[string]protocol.FileInfo, []protocol.FileInfo, error) {
@ -582,7 +589,7 @@ func (f *sendReceiveFolder) handleDir(file protocol.FileInfo, snap *db.Snapshot,
// that don't result in a conflict.
case err == nil && !info.IsDir():
// Check that it is what we have in the database.
curFile, hasCurFile := f.model.CurrentFolderFile(f.folderID, file.Name)
curFile, hasCurFile := snap.Get(protocol.LocalDeviceID, file.Name)
if err := f.scanIfItemChanged(file.Name, info, curFile, hasCurFile, scanChan); err != nil {
err = errors.Wrap(err, "handling dir")
f.newPullError(file.Name, err)
@ -766,7 +773,7 @@ func (f *sendReceiveFolder) handleSymlinkCheckExisting(file protocol.FileInfo, s
return err
}
// Check that it is what we have in the database.
curFile, hasCurFile := f.model.CurrentFolderFile(f.folderID, file.Name)
curFile, hasCurFile := snap.Get(protocol.LocalDeviceID, file.Name)
if err := f.scanIfItemChanged(file.Name, info, curFile, hasCurFile, scanChan); err != nil {
return err
}
@ -1427,7 +1434,7 @@ func (f *sendReceiveFolder) verifyBuffer(buf []byte, block protocol.BlockInfo) e
return nil
}
func (f *sendReceiveFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPullerState) {
func (f *sendReceiveFolder) pullerRoutine(snap *db.Snapshot, in <-chan pullBlockState, out chan<- *sharedPullerState) {
requestLimiter := newByteSemaphore(f.PullerMaxPendingKiB * 1024)
wg := sync.NewWaitGroup()
@ -1458,13 +1465,13 @@ func (f *sendReceiveFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *
defer wg.Done()
defer requestLimiter.give(bytes)
f.pullBlock(state, out)
f.pullBlock(state, snap, out)
}()
}
wg.Wait()
}
func (f *sendReceiveFolder) pullBlock(state pullBlockState, out chan<- *sharedPullerState) {
func (f *sendReceiveFolder) pullBlock(state pullBlockState, snap *db.Snapshot, out chan<- *sharedPullerState) {
// Get an fd to the temporary file. Technically we don't need it until
// after fetching the block, but if we run into an error here there is
// no point in issuing the request to the network.
@ -1483,7 +1490,7 @@ func (f *sendReceiveFolder) pullBlock(state pullBlockState, out chan<- *sharedPu
}
var lastError error
candidates := f.model.Availability(f.folderID, state.file, state.block)
candidates := f.model.availabilityInSnapshot(f.FolderConfiguration, snap, state.file, state.block)
for {
select {
case <-f.ctx.Done():

View File

@ -135,7 +135,7 @@ func TestHandleFile(t *testing.T) {
copyChan := make(chan copyBlocksState, 1)
f.handleFile(requiredFile, f.fset.Snapshot(), copyChan)
f.handleFile(requiredFile, fsetSnapshot(t, f.fset), copyChan)
// Receive the results
toCopy := <-copyChan
@ -181,7 +181,7 @@ func TestHandleFileWithTemp(t *testing.T) {
copyChan := make(chan copyBlocksState, 1)
f.handleFile(requiredFile, f.fset.Snapshot(), copyChan)
f.handleFile(requiredFile, fsetSnapshot(t, f.fset), copyChan)
// Receive the results
toCopy := <-copyChan
@ -245,7 +245,7 @@ func TestCopierFinder(t *testing.T) {
go f.copierRoutine(copyChan, pullChan, finisherChan)
defer close(copyChan)
f.handleFile(requiredFile, f.fset.Snapshot(), copyChan)
f.handleFile(requiredFile, fsetSnapshot(t, f.fset), copyChan)
timeout := time.After(10 * time.Second)
pulls := make([]pullBlockState, 4)
@ -379,7 +379,7 @@ func TestWeakHash(t *testing.T) {
// Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
fo.WeakHashThresholdPct = 101
fo.handleFile(desiredFile, fo.fset.Snapshot(), copyChan)
fo.handleFile(desiredFile, fsetSnapshot(t, fo.fset), copyChan)
var pulls []pullBlockState
timeout := time.After(10 * time.Second)
@ -408,7 +408,7 @@ func TestWeakHash(t *testing.T) {
// Test 2 - using weak hash, expectPulls blocks pulled.
fo.WeakHashThresholdPct = -1
fo.handleFile(desiredFile, fo.fset.Snapshot(), copyChan)
fo.handleFile(desiredFile, fsetSnapshot(t, fo.fset), copyChan)
pulls = pulls[:0]
for len(pulls) < expectPulls {
@ -489,7 +489,7 @@ func TestDeregisterOnFailInCopy(t *testing.T) {
finisherBufferChan := make(chan *sharedPullerState, 1)
finisherChan := make(chan *sharedPullerState)
dbUpdateChan := make(chan dbUpdateJob, 1)
snap := f.fset.Snapshot()
snap := fsetSnapshot(t, f.fset)
copyChan, copyWg := startCopier(f, pullChan, finisherBufferChan)
go f.finisherRoutine(snap, finisherChan, dbUpdateChan, make(chan string))
@ -589,13 +589,13 @@ func TestDeregisterOnFailInPull(t *testing.T) {
finisherBufferChan := make(chan *sharedPullerState)
finisherChan := make(chan *sharedPullerState)
dbUpdateChan := make(chan dbUpdateJob, 1)
snap := f.fset.Snapshot()
snap := fsetSnapshot(t, f.fset)
copyChan, copyWg := startCopier(f, pullChan, finisherBufferChan)
pullWg := sync.NewWaitGroup()
pullWg.Add(1)
go func() {
f.pullerRoutine(pullChan, finisherBufferChan)
f.pullerRoutine(snap, pullChan, finisherBufferChan)
pullWg.Done()
}()
go f.finisherRoutine(snap, finisherChan, dbUpdateChan, make(chan string))
@ -696,7 +696,7 @@ func TestIssue3164(t *testing.T) {
dbUpdateChan := make(chan dbUpdateJob, 1)
f.deleteDir(file, f.fset.Snapshot(), dbUpdateChan, make(chan string))
f.deleteDir(file, fsetSnapshot(t, f.fset), dbUpdateChan, make(chan string))
if _, err := ffs.Stat("issue3164"); !fs.IsNotExist(err) {
t.Fatal(err)
@ -828,7 +828,7 @@ func TestCopyOwner(t *testing.T) {
dbUpdateChan := make(chan dbUpdateJob, 1)
scanChan := make(chan string)
defer close(dbUpdateChan)
f.handleDir(dir, f.fset.Snapshot(), dbUpdateChan, scanChan)
f.handleDir(dir, fsetSnapshot(t, f.fset), dbUpdateChan, scanChan)
select {
case <-dbUpdateChan: // empty the channel for later
case toScan := <-scanChan:
@ -858,7 +858,7 @@ func TestCopyOwner(t *testing.T) {
// but it's the way data is passed around. When the database update
// comes the finisher is done.
snap := f.fset.Snapshot()
snap := fsetSnapshot(t, f.fset)
finisherChan := make(chan *sharedPullerState)
copierChan, copyWg := startCopier(f, nil, finisherChan)
go f.finisherRoutine(snap, finisherChan, dbUpdateChan, nil)
@ -926,7 +926,7 @@ func TestSRConflictReplaceFileByDir(t *testing.T) {
dbUpdateChan := make(chan dbUpdateJob, 1)
scanChan := make(chan string, 1)
f.handleDir(file, f.fset.Snapshot(), dbUpdateChan, scanChan)
f.handleDir(file, fsetSnapshot(t, f.fset), dbUpdateChan, scanChan)
if confls := existingConflicts(name, ffs); len(confls) != 1 {
t.Fatal("Expected one conflict, got", len(confls))
@ -959,7 +959,7 @@ func TestSRConflictReplaceFileByLink(t *testing.T) {
dbUpdateChan := make(chan dbUpdateJob, 1)
scanChan := make(chan string, 1)
f.handleSymlink(file, f.fset.Snapshot(), dbUpdateChan, scanChan)
f.handleSymlink(file, fsetSnapshot(t, f.fset), dbUpdateChan, scanChan)
if confls := existingConflicts(name, ffs); len(confls) != 1 {
t.Fatal("Expected one conflict, got", len(confls))
@ -1001,7 +1001,7 @@ func TestDeleteBehindSymlink(t *testing.T) {
fi.Version = fi.Version.Update(device1.Short())
scanChan := make(chan string, 1)
dbUpdateChan := make(chan dbUpdateJob, 1)
f.deleteFile(fi, f.fset.Snapshot(), dbUpdateChan, scanChan)
f.deleteFile(fi, fsetSnapshot(t, f.fset), dbUpdateChan, scanChan)
select {
case f := <-scanChan:
t.Fatalf("Received %v on scanChan", f)
@ -1031,7 +1031,7 @@ func TestPullCtxCancel(t *testing.T) {
var cancel context.CancelFunc
f.ctx, cancel = context.WithCancel(context.Background())
go f.pullerRoutine(pullChan, finisherChan)
go f.pullerRoutine(fsetSnapshot(t, f.fset), pullChan, finisherChan)
defer close(pullChan)
emptyState := func() pullBlockState {
@ -1077,7 +1077,7 @@ func TestPullDeleteUnscannedDir(t *testing.T) {
scanChan := make(chan string, 1)
dbUpdateChan := make(chan dbUpdateJob, 1)
f.deleteDir(fi, f.fset.Snapshot(), dbUpdateChan, scanChan)
f.deleteDir(fi, fsetSnapshot(t, f.fset), dbUpdateChan, scanChan)
if _, err := ffs.Stat(dir); fs.IsNotExist(err) {
t.Error("directory has been deleted")
@ -1226,7 +1226,7 @@ func TestPullTempFileCaseConflict(t *testing.T) {
fd.Close()
}
f.handleFile(file, f.fset.Snapshot(), copyChan)
f.handleFile(file, fsetSnapshot(t, f.fset), copyChan)
cs := <-copyChan
if _, err := cs.tempFile(); err != nil {
@ -1252,7 +1252,7 @@ func TestPullCaseOnlyRename(t *testing.T) {
must(t, f.scanSubdirs(nil))
cur, ok := m.CurrentFolderFile(f.ID, name)
cur, ok := m.testCurrentFolderFile(f.ID, name)
if !ok {
t.Fatal("file missing")
}
@ -1266,7 +1266,7 @@ func TestPullCaseOnlyRename(t *testing.T) {
dbUpdateChan := make(chan dbUpdateJob, 2)
scanChan := make(chan string, 2)
snap := f.fset.Snapshot()
snap := fsetSnapshot(t, f.fset)
defer snap.Release()
if err := f.renameFile(cur, deleted, confl, snap, dbUpdateChan, scanChan); err != nil {
t.Error(err)
@ -1293,7 +1293,7 @@ func TestPullSymlinkOverExistingWindows(t *testing.T) {
must(t, f.scanSubdirs(nil))
file, ok := m.CurrentFolderFile(f.ID, name)
file, ok := m.testCurrentFolderFile(f.ID, name)
if !ok {
t.Fatal("file missing")
}
@ -1301,11 +1301,12 @@ func TestPullSymlinkOverExistingWindows(t *testing.T) {
scanChan := make(chan string)
changed := f.pullerIteration(scanChan)
changed, err := f.pullerIteration(scanChan)
must(t, err)
if changed != 1 {
t.Error("Expected one change in pull, got", changed)
}
if file, ok := m.CurrentFolderFile(f.ID, name); !ok {
if file, ok := m.testCurrentFolderFile(f.ID, name); !ok {
t.Error("symlink entry missing")
} else if !file.IsUnsupported() {
t.Error("symlink entry isn't marked as unsupported")
@ -1341,7 +1342,7 @@ func TestPullDeleteCaseConflict(t *testing.T) {
t.Error("Missing db update for file")
}
snap := f.fset.Snapshot()
snap := fsetSnapshot(t, f.fset)
defer snap.Release()
f.deleteDir(fi, snap, dbUpdateChan, scanChan)
select {
@ -1371,7 +1372,7 @@ func TestPullDeleteIgnoreChildDir(t *testing.T) {
scanChan := make(chan string, 2)
err := f.deleteDirOnDisk(parent, f.fset.Snapshot(), scanChan)
err := f.deleteDirOnDisk(parent, fsetSnapshot(t, f.fset), scanChan)
if err == nil {
t.Error("no error")
}

View File

@ -96,7 +96,7 @@ func (c *folderSummaryService) Summary(folder string) (map[string]interface{}, e
// For API backwards compatibility (SyncTrayzor needs it) an empty folder
// summary is returned for not running folders, an error might actually be
// more appropriate
if err != nil && err != ErrFolderPaused && err != errFolderNotRunning {
if err != nil && err != ErrFolderPaused && err != ErrFolderNotRunning {
return nil, err
}
@ -348,9 +348,14 @@ func (c *folderSummaryService) sendSummary(ctx context.Context, folder string) {
// Get completion percentage of this folder for the
// remote device.
comp := c.model.Completion(devCfg.DeviceID, folder).Map()
comp["folder"] = folder
comp["device"] = devCfg.DeviceID.String()
c.evLogger.Log(events.FolderCompletion, comp)
comp, err := c.model.Completion(devCfg.DeviceID, folder)
if err != nil {
l.Debugf("Error getting completion for folder %v, device %v: %v", folder, devCfg.DeviceID, err)
continue
}
ev := comp.Map()
ev["folder"] = folder
ev["device"] = devCfg.DeviceID.String()
c.evLogger.Log(events.FolderCompletion, ev)
}
}

View File

@ -131,7 +131,10 @@ func (s *indexSender) sendIndexTo(ctx context.Context) error {
var err error
var f protocol.FileInfo
snap := s.fset.Snapshot()
snap, err := s.fset.Snapshot()
if err != nil {
return svcutil.AsFatalErr(err, svcutil.ExitError)
}
defer snap.Release()
previousWasDelete := false
snap.WithHaveSequence(s.prevSequence+1, func(fi protocol.FileIntf) bool {

View File

@ -22,7 +22,7 @@ type Model struct {
arg1 protocol.Connection
arg2 protocol.Hello
}
AvailabilityStub func(string, protocol.FileInfo, protocol.BlockInfo) []model.Availability
AvailabilityStub func(string, protocol.FileInfo, protocol.BlockInfo) ([]model.Availability, error)
availabilityMutex sync.RWMutex
availabilityArgsForCall []struct {
arg1 string
@ -31,9 +31,11 @@ type Model struct {
}
availabilityReturns struct {
result1 []model.Availability
result2 error
}
availabilityReturnsOnCall map[int]struct {
result1 []model.Availability
result2 error
}
BringToFrontStub func(string, string)
bringToFrontMutex sync.RWMutex
@ -59,7 +61,7 @@ type Model struct {
clusterConfigReturnsOnCall map[int]struct {
result1 error
}
CompletionStub func(protocol.DeviceID, string) model.FolderCompletion
CompletionStub func(protocol.DeviceID, string) (model.FolderCompletion, error)
completionMutex sync.RWMutex
completionArgsForCall []struct {
arg1 protocol.DeviceID
@ -67,9 +69,11 @@ type Model struct {
}
completionReturns struct {
result1 model.FolderCompletion
result2 error
}
completionReturnsOnCall map[int]struct {
result1 model.FolderCompletion
result2 error
}
ConnectionStub func(protocol.DeviceID) (protocol.Connection, bool)
connectionMutex sync.RWMutex
@ -94,7 +98,7 @@ type Model struct {
connectionStatsReturnsOnCall map[int]struct {
result1 map[string]interface{}
}
CurrentFolderFileStub func(string, string) (protocol.FileInfo, bool)
CurrentFolderFileStub func(string, string) (protocol.FileInfo, bool, error)
currentFolderFileMutex sync.RWMutex
currentFolderFileArgsForCall []struct {
arg1 string
@ -103,12 +107,14 @@ type Model struct {
currentFolderFileReturns struct {
result1 protocol.FileInfo
result2 bool
result3 error
}
currentFolderFileReturnsOnCall map[int]struct {
result1 protocol.FileInfo
result2 bool
result3 error
}
CurrentGlobalFileStub func(string, string) (protocol.FileInfo, bool)
CurrentGlobalFileStub func(string, string) (protocol.FileInfo, bool, error)
currentGlobalFileMutex sync.RWMutex
currentGlobalFileArgsForCall []struct {
arg1 string
@ -117,10 +123,12 @@ type Model struct {
currentGlobalFileReturns struct {
result1 protocol.FileInfo
result2 bool
result3 error
}
currentGlobalFileReturnsOnCall map[int]struct {
result1 protocol.FileInfo
result2 bool
result3 error
}
CurrentIgnoresStub func(string) ([]string, []string, error)
currentIgnoresMutex sync.RWMutex
@ -577,7 +585,7 @@ func (fake *Model) AddConnectionArgsForCall(i int) (protocol.Connection, protoco
return argsForCall.arg1, argsForCall.arg2
}
func (fake *Model) Availability(arg1 string, arg2 protocol.FileInfo, arg3 protocol.BlockInfo) []model.Availability {
func (fake *Model) Availability(arg1 string, arg2 protocol.FileInfo, arg3 protocol.BlockInfo) ([]model.Availability, error) {
fake.availabilityMutex.Lock()
ret, specificReturn := fake.availabilityReturnsOnCall[len(fake.availabilityArgsForCall)]
fake.availabilityArgsForCall = append(fake.availabilityArgsForCall, struct {
@ -593,9 +601,9 @@ func (fake *Model) Availability(arg1 string, arg2 protocol.FileInfo, arg3 protoc
return stub(arg1, arg2, arg3)
}
if specificReturn {
return ret.result1
return ret.result1, ret.result2
}
return fakeReturns.result1
return fakeReturns.result1, fakeReturns.result2
}
func (fake *Model) AvailabilityCallCount() int {
@ -604,7 +612,7 @@ func (fake *Model) AvailabilityCallCount() int {
return len(fake.availabilityArgsForCall)
}
func (fake *Model) AvailabilityCalls(stub func(string, protocol.FileInfo, protocol.BlockInfo) []model.Availability) {
func (fake *Model) AvailabilityCalls(stub func(string, protocol.FileInfo, protocol.BlockInfo) ([]model.Availability, error)) {
fake.availabilityMutex.Lock()
defer fake.availabilityMutex.Unlock()
fake.AvailabilityStub = stub
@ -617,27 +625,30 @@ func (fake *Model) AvailabilityArgsForCall(i int) (string, protocol.FileInfo, pr
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
}
func (fake *Model) AvailabilityReturns(result1 []model.Availability) {
func (fake *Model) AvailabilityReturns(result1 []model.Availability, result2 error) {
fake.availabilityMutex.Lock()
defer fake.availabilityMutex.Unlock()
fake.AvailabilityStub = nil
fake.availabilityReturns = struct {
result1 []model.Availability
}{result1}
result2 error
}{result1, result2}
}
func (fake *Model) AvailabilityReturnsOnCall(i int, result1 []model.Availability) {
func (fake *Model) AvailabilityReturnsOnCall(i int, result1 []model.Availability, result2 error) {
fake.availabilityMutex.Lock()
defer fake.availabilityMutex.Unlock()
fake.AvailabilityStub = nil
if fake.availabilityReturnsOnCall == nil {
fake.availabilityReturnsOnCall = make(map[int]struct {
result1 []model.Availability
result2 error
})
}
fake.availabilityReturnsOnCall[i] = struct {
result1 []model.Availability
}{result1}
result2 error
}{result1, result2}
}
func (fake *Model) BringToFront(arg1 string, arg2 string) {
@ -768,7 +779,7 @@ func (fake *Model) ClusterConfigReturnsOnCall(i int, result1 error) {
}{result1}
}
func (fake *Model) Completion(arg1 protocol.DeviceID, arg2 string) model.FolderCompletion {
func (fake *Model) Completion(arg1 protocol.DeviceID, arg2 string) (model.FolderCompletion, error) {
fake.completionMutex.Lock()
ret, specificReturn := fake.completionReturnsOnCall[len(fake.completionArgsForCall)]
fake.completionArgsForCall = append(fake.completionArgsForCall, struct {
@ -783,9 +794,9 @@ func (fake *Model) Completion(arg1 protocol.DeviceID, arg2 string) model.FolderC
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1
return ret.result1, ret.result2
}
return fakeReturns.result1
return fakeReturns.result1, fakeReturns.result2
}
func (fake *Model) CompletionCallCount() int {
@ -794,7 +805,7 @@ func (fake *Model) CompletionCallCount() int {
return len(fake.completionArgsForCall)
}
func (fake *Model) CompletionCalls(stub func(protocol.DeviceID, string) model.FolderCompletion) {
func (fake *Model) CompletionCalls(stub func(protocol.DeviceID, string) (model.FolderCompletion, error)) {
fake.completionMutex.Lock()
defer fake.completionMutex.Unlock()
fake.CompletionStub = stub
@ -807,27 +818,30 @@ func (fake *Model) CompletionArgsForCall(i int) (protocol.DeviceID, string) {
return argsForCall.arg1, argsForCall.arg2
}
func (fake *Model) CompletionReturns(result1 model.FolderCompletion) {
func (fake *Model) CompletionReturns(result1 model.FolderCompletion, result2 error) {
fake.completionMutex.Lock()
defer fake.completionMutex.Unlock()
fake.CompletionStub = nil
fake.completionReturns = struct {
result1 model.FolderCompletion
}{result1}
result2 error
}{result1, result2}
}
func (fake *Model) CompletionReturnsOnCall(i int, result1 model.FolderCompletion) {
func (fake *Model) CompletionReturnsOnCall(i int, result1 model.FolderCompletion, result2 error) {
fake.completionMutex.Lock()
defer fake.completionMutex.Unlock()
fake.CompletionStub = nil
if fake.completionReturnsOnCall == nil {
fake.completionReturnsOnCall = make(map[int]struct {
result1 model.FolderCompletion
result2 error
})
}
fake.completionReturnsOnCall[i] = struct {
result1 model.FolderCompletion
}{result1}
result2 error
}{result1, result2}
}
func (fake *Model) Connection(arg1 protocol.DeviceID) (protocol.Connection, bool) {
@ -947,7 +961,7 @@ func (fake *Model) ConnectionStatsReturnsOnCall(i int, result1 map[string]interf
}{result1}
}
func (fake *Model) CurrentFolderFile(arg1 string, arg2 string) (protocol.FileInfo, bool) {
func (fake *Model) CurrentFolderFile(arg1 string, arg2 string) (protocol.FileInfo, bool, error) {
fake.currentFolderFileMutex.Lock()
ret, specificReturn := fake.currentFolderFileReturnsOnCall[len(fake.currentFolderFileArgsForCall)]
fake.currentFolderFileArgsForCall = append(fake.currentFolderFileArgsForCall, struct {
@ -962,9 +976,9 @@ func (fake *Model) CurrentFolderFile(arg1 string, arg2 string) (protocol.FileInf
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1, ret.result2
return ret.result1, ret.result2, ret.result3
}
return fakeReturns.result1, fakeReturns.result2
return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3
}
func (fake *Model) CurrentFolderFileCallCount() int {
@ -973,7 +987,7 @@ func (fake *Model) CurrentFolderFileCallCount() int {
return len(fake.currentFolderFileArgsForCall)
}
func (fake *Model) CurrentFolderFileCalls(stub func(string, string) (protocol.FileInfo, bool)) {
func (fake *Model) CurrentFolderFileCalls(stub func(string, string) (protocol.FileInfo, bool, error)) {
fake.currentFolderFileMutex.Lock()
defer fake.currentFolderFileMutex.Unlock()
fake.CurrentFolderFileStub = stub
@ -986,17 +1000,18 @@ func (fake *Model) CurrentFolderFileArgsForCall(i int) (string, string) {
return argsForCall.arg1, argsForCall.arg2
}
func (fake *Model) CurrentFolderFileReturns(result1 protocol.FileInfo, result2 bool) {
func (fake *Model) CurrentFolderFileReturns(result1 protocol.FileInfo, result2 bool, result3 error) {
fake.currentFolderFileMutex.Lock()
defer fake.currentFolderFileMutex.Unlock()
fake.CurrentFolderFileStub = nil
fake.currentFolderFileReturns = struct {
result1 protocol.FileInfo
result2 bool
}{result1, result2}
result3 error
}{result1, result2, result3}
}
func (fake *Model) CurrentFolderFileReturnsOnCall(i int, result1 protocol.FileInfo, result2 bool) {
func (fake *Model) CurrentFolderFileReturnsOnCall(i int, result1 protocol.FileInfo, result2 bool, result3 error) {
fake.currentFolderFileMutex.Lock()
defer fake.currentFolderFileMutex.Unlock()
fake.CurrentFolderFileStub = nil
@ -1004,15 +1019,17 @@ func (fake *Model) CurrentFolderFileReturnsOnCall(i int, result1 protocol.FileIn
fake.currentFolderFileReturnsOnCall = make(map[int]struct {
result1 protocol.FileInfo
result2 bool
result3 error
})
}
fake.currentFolderFileReturnsOnCall[i] = struct {
result1 protocol.FileInfo
result2 bool
}{result1, result2}
result3 error
}{result1, result2, result3}
}
func (fake *Model) CurrentGlobalFile(arg1 string, arg2 string) (protocol.FileInfo, bool) {
func (fake *Model) CurrentGlobalFile(arg1 string, arg2 string) (protocol.FileInfo, bool, error) {
fake.currentGlobalFileMutex.Lock()
ret, specificReturn := fake.currentGlobalFileReturnsOnCall[len(fake.currentGlobalFileArgsForCall)]
fake.currentGlobalFileArgsForCall = append(fake.currentGlobalFileArgsForCall, struct {
@ -1027,9 +1044,9 @@ func (fake *Model) CurrentGlobalFile(arg1 string, arg2 string) (protocol.FileInf
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1, ret.result2
return ret.result1, ret.result2, ret.result3
}
return fakeReturns.result1, fakeReturns.result2
return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3
}
func (fake *Model) CurrentGlobalFileCallCount() int {
@ -1038,7 +1055,7 @@ func (fake *Model) CurrentGlobalFileCallCount() int {
return len(fake.currentGlobalFileArgsForCall)
}
func (fake *Model) CurrentGlobalFileCalls(stub func(string, string) (protocol.FileInfo, bool)) {
func (fake *Model) CurrentGlobalFileCalls(stub func(string, string) (protocol.FileInfo, bool, error)) {
fake.currentGlobalFileMutex.Lock()
defer fake.currentGlobalFileMutex.Unlock()
fake.CurrentGlobalFileStub = stub
@ -1051,17 +1068,18 @@ func (fake *Model) CurrentGlobalFileArgsForCall(i int) (string, string) {
return argsForCall.arg1, argsForCall.arg2
}
func (fake *Model) CurrentGlobalFileReturns(result1 protocol.FileInfo, result2 bool) {
func (fake *Model) CurrentGlobalFileReturns(result1 protocol.FileInfo, result2 bool, result3 error) {
fake.currentGlobalFileMutex.Lock()
defer fake.currentGlobalFileMutex.Unlock()
fake.CurrentGlobalFileStub = nil
fake.currentGlobalFileReturns = struct {
result1 protocol.FileInfo
result2 bool
}{result1, result2}
result3 error
}{result1, result2, result3}
}
func (fake *Model) CurrentGlobalFileReturnsOnCall(i int, result1 protocol.FileInfo, result2 bool) {
func (fake *Model) CurrentGlobalFileReturnsOnCall(i int, result1 protocol.FileInfo, result2 bool, result3 error) {
fake.currentGlobalFileMutex.Lock()
defer fake.currentGlobalFileMutex.Unlock()
fake.CurrentGlobalFileStub = nil
@ -1069,12 +1087,14 @@ func (fake *Model) CurrentGlobalFileReturnsOnCall(i int, result1 protocol.FileIn
fake.currentGlobalFileReturnsOnCall = make(map[int]struct {
result1 protocol.FileInfo
result2 bool
result3 error
})
}
fake.currentGlobalFileReturnsOnCall[i] = struct {
result1 protocol.FileInfo
result2 bool
}{result1, result2}
result3 error
}{result1, result2, result3}
}
func (fake *Model) CurrentIgnores(arg1 string) ([]string, []string, error) {

View File

@ -98,11 +98,11 @@ type Model interface {
LocalChangedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, error)
FolderProgressBytesCompleted(folder string) int64
CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool)
CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool)
Availability(folder string, file protocol.FileInfo, block protocol.BlockInfo) []Availability
CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool, error)
CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool, error)
Availability(folder string, file protocol.FileInfo, block protocol.BlockInfo) ([]Availability, error)
Completion(device protocol.DeviceID, folder string) FolderCompletion
Completion(device protocol.DeviceID, folder string) (FolderCompletion, error)
ConnectionStats() map[string]interface{}
DeviceStatistics() (map[protocol.DeviceID]stats.DeviceStatistics, error)
FolderStatistics() (map[string]stats.FolderStatistics, error)
@ -179,8 +179,8 @@ var (
errDeviceIgnored = errors.New("device is ignored")
errDeviceRemoved = errors.New("device has been removed")
ErrFolderPaused = errors.New("folder is paused")
errFolderNotRunning = errors.New("folder is not running")
errFolderMissing = errors.New("no such folder")
ErrFolderNotRunning = errors.New("folder is not running")
ErrFolderMissing = errors.New("no such folder")
errNetworkNotAllowed = errors.New("network not allowed")
errNoVersioner = errors.New("folder has no versioner")
// errors about why a connection is closed
@ -875,7 +875,7 @@ func (comp FolderCompletion) Map() map[string]interface{} {
// (including the local device) or explicitly protocol.LocalDeviceID. An
// empty folder string means the aggregate of all folders shared with the
// given device.
func (m *model) Completion(device protocol.DeviceID, folder string) FolderCompletion {
func (m *model) Completion(device protocol.DeviceID, folder string) (FolderCompletion, error) {
// The user specifically asked for our own device ID. Internally that is
// known as protocol.LocalDeviceID so translate.
if device == m.id {
@ -891,21 +891,29 @@ func (m *model) Completion(device protocol.DeviceID, folder string) FolderComple
var comp FolderCompletion
for _, fcfg := range m.cfg.FolderList() {
if device == protocol.LocalDeviceID || fcfg.SharedWith(device) {
comp.add(m.folderCompletion(device, fcfg.ID))
folderComp, err := m.folderCompletion(device, fcfg.ID)
if err != nil {
return FolderCompletion{}, err
}
comp.add(folderComp)
}
}
return comp
return comp, nil
}
func (m *model) folderCompletion(device protocol.DeviceID, folder string) FolderCompletion {
func (m *model) folderCompletion(device protocol.DeviceID, folder string) (FolderCompletion, error) {
m.fmut.RLock()
rf, ok := m.folderFiles[folder]
err := m.checkFolderRunningLocked(folder)
rf := m.folderFiles[folder]
m.fmut.RUnlock()
if !ok {
return FolderCompletion{} // Folder doesn't exist, so we hardly have any of it
if err != nil {
return FolderCompletion{}, err
}
snap := rf.Snapshot()
snap, err := rf.Snapshot()
if err != nil {
return FolderCompletion{}, err
}
defer snap.Release()
m.pmut.RLock()
@ -922,7 +930,7 @@ func (m *model) folderCompletion(device protocol.DeviceID, folder string) Folder
comp := newFolderCompletion(snap.GlobalSize(), need, snap.Sequence(device))
l.Debugf("%v Completion(%s, %q): %v", m, device, folder, comp.Map())
return comp
return comp, nil
}
// DBSnapshot returns a snapshot of the database content relevant to the given folder.
@ -934,7 +942,7 @@ func (m *model) DBSnapshot(folder string) (*db.Snapshot, error) {
if err != nil {
return nil, err
}
return rf.Snapshot(), nil
return rf.Snapshot()
}
func (m *model) FolderProgressBytesCompleted(folder string) int64 {
@ -951,10 +959,13 @@ func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfo
m.fmut.RUnlock()
if !rfOk {
return nil, nil, nil, errFolderMissing
return nil, nil, nil, ErrFolderMissing
}
snap := rf.Snapshot()
snap, err := rf.Snapshot()
if err != nil {
return nil, nil, nil, err
}
defer snap.Release()
var progress, queued, rest []db.FileInfoTruncated
var seen map[string]struct{}
@ -1018,10 +1029,13 @@ func (m *model) RemoteNeedFolderFiles(folder string, device protocol.DeviceID, p
m.fmut.RUnlock()
if !ok {
return nil, errFolderMissing
return nil, ErrFolderMissing
}
snap := rf.Snapshot()
snap, err := rf.Snapshot()
if err != nil {
return nil, err
}
defer snap.Release()
files := make([]db.FileInfoTruncated, 0, perpage)
@ -1043,10 +1057,13 @@ func (m *model) LocalChangedFolderFiles(folder string, page, perpage int) ([]db.
m.fmut.RUnlock()
if !ok {
return nil, errFolderMissing
return nil, ErrFolderMissing
}
snap := rf.Snapshot()
snap, err := rf.Snapshot()
if err != nil {
return nil, err
}
defer snap.Release()
if snap.ReceiveOnlyChangedSize().TotalItems() == 0 {
@ -1120,7 +1137,7 @@ func (m *model) handleIndex(deviceID protocol.DeviceID, folder string, fs []prot
if cfg, ok := m.cfg.Folder(folder); !ok || !cfg.SharedWith(deviceID) {
l.Infof("%v for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration.", op, folder, deviceID)
return errors.Wrap(errFolderMissing, folder)
return errors.Wrap(ErrFolderMissing, folder)
} else if cfg.Paused {
l.Debugf("%v for paused folder (ID %q) sent from device %q.", op, folder, deviceID)
return errors.Wrap(ErrFolderPaused, folder)
@ -1133,7 +1150,7 @@ func (m *model) handleIndex(deviceID protocol.DeviceID, folder string, fs []prot
if !existing {
l.Infof("%v for nonexistent folder %q", op, folder)
return errors.Wrap(errFolderMissing, folder)
return errors.Wrap(ErrFolderMissing, folder)
}
if running {
@ -1930,7 +1947,11 @@ func newLimitedRequestResponse(size int, limiters ...*byteSemaphore) *requestRes
}
func (m *model) recheckFile(deviceID protocol.DeviceID, folder, name string, offset int64, hash []byte, weakHash uint32) {
cf, ok := m.CurrentFolderFile(folder, name)
cf, ok, err := m.CurrentFolderFile(folder, name)
if err != nil {
l.Debugf("%v recheckFile: %s: %q / %q: current file error: %v", m, deviceID, folder, name, err)
return
}
if !ok {
l.Debugf("%v recheckFile: %s: %q / %q: no current file", m, deviceID, folder, name)
return
@ -1976,28 +1997,36 @@ func (m *model) recheckFile(deviceID protocol.DeviceID, folder, name string, off
l.Debugf("%v recheckFile: %s: %q / %q", m, deviceID, folder, name)
}
func (m *model) CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool) {
func (m *model) CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool, error) {
m.fmut.RLock()
fs, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if !ok {
return protocol.FileInfo{}, false
return protocol.FileInfo{}, false, ErrFolderMissing
}
snap := fs.Snapshot()
defer snap.Release()
return snap.Get(protocol.LocalDeviceID, file)
snap, err := fs.Snapshot()
if err != nil {
return protocol.FileInfo{}, false, err
}
f, ok := snap.Get(protocol.LocalDeviceID, file)
snap.Release()
return f, ok, nil
}
func (m *model) CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool) {
func (m *model) CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool, error) {
m.fmut.RLock()
fs, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if !ok {
return protocol.FileInfo{}, false
return protocol.FileInfo{}, false, ErrFolderMissing
}
snap := fs.Snapshot()
defer snap.Release()
return snap.GetGlobal(file)
snap, err := fs.Snapshot()
if err != nil {
return protocol.FileInfo{}, false, err
}
f, ok := snap.GetGlobal(file)
snap.Release()
return f, ok, nil
}
// Connection returns the current connection for device, and a boolean whether a connection was found.
@ -2552,7 +2581,7 @@ func (m *model) GlobalDirectoryTree(folder, prefix string, levels int, dirsOnly
files, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if !ok {
return nil, errFolderMissing
return nil, ErrFolderMissing
}
root := &TreeEntry{
@ -2565,9 +2594,11 @@ func (m *model) GlobalDirectoryTree(folder, prefix string, levels int, dirsOnly
prefix = prefix + sep
}
snap := files.Snapshot()
snap, err := files.Snapshot()
if err != nil {
return nil, err
}
defer snap.Release()
var err error
snap.WithPrefixedGlobalTruncated(prefix, func(fi protocol.FileIntf) bool {
f := fi.(db.FileInfoTruncated)
@ -2661,7 +2692,7 @@ func (m *model) RestoreFolderVersions(folder string, versions map[string]time.Ti
return restoreErrors, nil
}
func (m *model) Availability(folder string, file protocol.FileInfo, block protocol.BlockInfo) []Availability {
func (m *model) Availability(folder string, file protocol.FileInfo, block protocol.BlockInfo) ([]Availability, error) {
// The slightly unusual locking sequence here is because we need to hold
// pmut for the duration (as the value returned from foldersFiles can
// get heavily modified on Close()), but also must acquire fmut before
@ -2675,17 +2706,25 @@ func (m *model) Availability(folder string, file protocol.FileInfo, block protoc
m.fmut.RUnlock()
if !ok {
return nil
return nil, ErrFolderMissing
}
var availabilities []Availability
snap := fs.Snapshot()
snap, err := fs.Snapshot()
if err != nil {
return nil, err
}
defer snap.Release()
return m.availabilityInSnapshot(cfg, snap, file, block), nil
}
func (m *model) availabilityInSnapshot(cfg config.FolderConfiguration, snap *db.Snapshot, file protocol.FileInfo, block protocol.BlockInfo) []Availability {
var availabilities []Availability
for _, device := range snap.Availability(file.Name) {
if _, ok := m.remotePausedFolders[device]; !ok {
continue
}
if _, ok := m.remotePausedFolders[device][folder]; ok {
if _, ok := m.remotePausedFolders[device][cfg.ID]; ok {
continue
}
_, ok := m.conn[device]
@ -2695,7 +2734,7 @@ func (m *model) Availability(folder string, file protocol.FileInfo, block protoc
}
for _, device := range cfg.Devices {
if m.deviceDownloads[device.DeviceID].Has(folder, file.Name, file.Version, int(block.Offset/int64(file.BlockSize()))) {
if m.deviceDownloads[device.DeviceID].Has(cfg.ID, file.Name, file.Version, int(block.Offset/int64(file.BlockSize()))) {
availabilities = append(availabilities, Availability{ID: device.DeviceID, FromTemporary: true})
}
}
@ -2960,12 +2999,12 @@ func (m *model) checkFolderRunningLocked(folder string) error {
}
if cfg, ok := m.cfg.Folder(folder); !ok {
return errFolderMissing
return ErrFolderMissing
} else if cfg.Paused {
return ErrFolderPaused
}
return errFolderNotRunning
return ErrFolderNotRunning
}
// PendingDevices lists unknown devices that tried to connect.

View File

@ -2300,7 +2300,7 @@ func TestIssue3496(t *testing.T) {
fs := m.folderFiles["default"]
m.fmut.RUnlock()
var localFiles []protocol.FileInfo
snap := fs.Snapshot()
snap := fsetSnapshot(t, fs)
snap.WithHave(protocol.LocalDeviceID, func(i protocol.FileIntf) bool {
localFiles = append(localFiles, i.(protocol.FileInfo))
return true
@ -2329,7 +2329,7 @@ func TestIssue3496(t *testing.T) {
// Check that the completion percentage for us makes sense
comp := m.Completion(protocol.LocalDeviceID, "default")
comp := m.testCompletion(protocol.LocalDeviceID, "default")
if comp.NeedBytes > comp.GlobalBytes {
t.Errorf("Need more bytes than exist, not possible: %d > %d", comp.NeedBytes, comp.GlobalBytes)
}
@ -2393,7 +2393,7 @@ func TestNoRequestsFromPausedDevices(t *testing.T) {
files.Update(device1, []protocol.FileInfo{file})
files.Update(device2, []protocol.FileInfo{file})
avail := m.Availability("default", file, file.Blocks[0])
avail := m.testAvailability("default", file, file.Blocks[0])
if len(avail) != 0 {
t.Errorf("should not be available, no connections")
}
@ -2403,7 +2403,7 @@ func TestNoRequestsFromPausedDevices(t *testing.T) {
// !!! This is not what I'd expect to happen, as we don't even know if the peer has the original index !!!
avail = m.Availability("default", file, file.Blocks[0])
avail = m.testAvailability("default", file, file.Blocks[0])
if len(avail) != 2 {
t.Errorf("should have two available")
}
@ -2423,7 +2423,7 @@ func TestNoRequestsFromPausedDevices(t *testing.T) {
m.ClusterConfig(device1, cc)
m.ClusterConfig(device2, cc)
avail = m.Availability("default", file, file.Blocks[0])
avail = m.testAvailability("default", file, file.Blocks[0])
if len(avail) != 2 {
t.Errorf("should have two available")
}
@ -2431,7 +2431,7 @@ func TestNoRequestsFromPausedDevices(t *testing.T) {
m.Closed(newFakeConnection(device1, m), errDeviceUnknown)
m.Closed(newFakeConnection(device2, m), errDeviceUnknown)
avail = m.Availability("default", file, file.Blocks[0])
avail = m.testAvailability("default", file, file.Blocks[0])
if len(avail) != 0 {
t.Errorf("should have no available")
}
@ -2446,7 +2446,7 @@ func TestNoRequestsFromPausedDevices(t *testing.T) {
ccp.Folders[0].Paused = true
m.ClusterConfig(device1, ccp)
avail = m.Availability("default", file, file.Blocks[0])
avail = m.testAvailability("default", file, file.Blocks[0])
if len(avail) != 1 {
t.Errorf("should have one available")
}
@ -2479,12 +2479,12 @@ func TestIssue2571(t *testing.T) {
m.ScanFolder("default")
if dir, ok := m.CurrentFolderFile("default", "toLink"); !ok {
if dir, ok := m.testCurrentFolderFile("default", "toLink"); !ok {
t.Fatalf("Dir missing in db")
} else if !dir.IsSymlink() {
t.Errorf("Dir wasn't changed to symlink")
}
if file, ok := m.CurrentFolderFile("default", filepath.Join("toLink", "a")); !ok {
if file, ok := m.testCurrentFolderFile("default", filepath.Join("toLink", "a")); !ok {
t.Fatalf("File missing in db")
} else if !file.Deleted {
t.Errorf("File below symlink has not been marked as deleted")
@ -2517,7 +2517,7 @@ func TestIssue4573(t *testing.T) {
m.ScanFolder("default")
if file, ok := m.CurrentFolderFile("default", file); !ok {
if file, ok := m.testCurrentFolderFile("default", file); !ok {
t.Fatalf("File missing in db")
} else if file.Deleted {
t.Errorf("Inaccessible file has been marked as deleted.")
@ -2577,7 +2577,7 @@ func TestInternalScan(t *testing.T) {
m.ScanFolder("default")
for path, cond := range testCases {
if f, ok := m.CurrentFolderFile("default", path); !ok {
if f, ok := m.testCurrentFolderFile("default", path); !ok {
t.Fatalf("%v missing in db", path)
} else if cond(f) {
t.Errorf("Incorrect db entry for %v", path)
@ -2638,14 +2638,14 @@ func TestRemoveDirWithContent(t *testing.T) {
m := setupModel(t, defaultCfgWrapper)
defer cleanupModel(m)
dir, ok := m.CurrentFolderFile("default", "dirwith")
dir, ok := m.testCurrentFolderFile("default", "dirwith")
if !ok {
t.Fatalf("Can't get dir \"dirwith\" after initial scan")
}
dir.Deleted = true
dir.Version = dir.Version.Update(device1.Short()).Update(device1.Short())
file, ok := m.CurrentFolderFile("default", content)
file, ok := m.testCurrentFolderFile("default", content)
if !ok {
t.Fatalf("Can't get file \"%v\" after initial scan", content)
}
@ -2657,11 +2657,11 @@ func TestRemoveDirWithContent(t *testing.T) {
// Is there something we could trigger on instead of just waiting?
timeout := time.NewTimer(5 * time.Second)
for {
dir, ok := m.CurrentFolderFile("default", "dirwith")
dir, ok := m.testCurrentFolderFile("default", "dirwith")
if !ok {
t.Fatalf("Can't get dir \"dirwith\" after index update")
}
file, ok := m.CurrentFolderFile("default", content)
file, ok := m.testCurrentFolderFile("default", content)
if !ok {
t.Fatalf("Can't get file \"%v\" after index update", content)
}
@ -2713,11 +2713,11 @@ func TestIssue4475(t *testing.T) {
created := false
for {
if !created {
if _, ok := m.CurrentFolderFile("default", fileName); ok {
if _, ok := m.testCurrentFolderFile("default", fileName); ok {
created = true
}
} else {
dir, ok := m.CurrentFolderFile("default", "delDir")
dir, ok := m.testCurrentFolderFile("default", "delDir")
if !ok {
t.Fatalf("can't get dir from db")
}
@ -2954,7 +2954,7 @@ func TestPausedFolders(t *testing.T) {
t.Errorf("Expected folder paused error, received: %v", err)
}
if err := m.ScanFolder("nonexistent"); err != errFolderMissing {
if err := m.ScanFolder("nonexistent"); err != ErrFolderMissing {
t.Errorf("Expected missing folder error, received: %v", err)
}
}
@ -3035,7 +3035,7 @@ func TestIssue5002(t *testing.T) {
t.Error(err)
}
file, ok := m.CurrentFolderFile("default", "foo")
file, ok := m.testCurrentFolderFile("default", "foo")
if !ok {
t.Fatal("test file should exist")
}
@ -3054,7 +3054,7 @@ func TestParentOfUnignored(t *testing.T) {
m.SetIgnores("default", []string{"!quux", "*"})
if parent, ok := m.CurrentFolderFile("default", "baz"); !ok {
if parent, ok := m.testCurrentFolderFile("default", "baz"); !ok {
t.Errorf(`Directory "baz" missing in db`)
} else if parent.IsIgnored() {
t.Errorf(`Directory "baz" is ignored`)
@ -3222,7 +3222,7 @@ func TestModTimeWindow(t *testing.T) {
// Get current version
fi, ok := m.CurrentFolderFile("default", name)
fi, ok := m.testCurrentFolderFile("default", name)
if !ok {
t.Fatal("File missing")
}
@ -3237,7 +3237,7 @@ func TestModTimeWindow(t *testing.T) {
// No change due to within window
fi, _ = m.CurrentFolderFile("default", name)
fi, _ = m.testCurrentFolderFile("default", name)
if !fi.Version.Equal(v) {
t.Fatalf("Got version %v, expected %v", fi.Version, v)
}
@ -3251,7 +3251,7 @@ func TestModTimeWindow(t *testing.T) {
// Version should have updated
fi, _ = m.CurrentFolderFile("default", name)
fi, _ = m.testCurrentFolderFile("default", name)
if fi.Version.Compare(v) != protocol.Greater {
t.Fatalf("Got result %v, expected %v", fi.Version.Compare(v), protocol.Greater)
}
@ -3368,8 +3368,8 @@ func TestFolderAPIErrors(t *testing.T) {
if err := method(fcfg.ID); err != ErrFolderPaused {
t.Errorf(`Expected "%v", got "%v" (method no %v)`, ErrFolderPaused, err, i)
}
if err := method("notexisting"); err != errFolderMissing {
t.Errorf(`Expected "%v", got "%v" (method no %v)`, errFolderMissing, err, i)
if err := method("notexisting"); err != ErrFolderMissing {
t.Errorf(`Expected "%v", got "%v" (method no %v)`, ErrFolderMissing, err, i)
}
}
}
@ -3776,7 +3776,7 @@ func TestScanDeletedROChangedOnSR(t *testing.T) {
must(t, writeFile(ffs, name, []byte(name), 0644))
m.ScanFolders()
file, ok := m.CurrentFolderFile(fcfg.ID, name)
file, ok := m.testCurrentFolderFile(fcfg.ID, name)
if !ok {
t.Fatal("file missing in db")
}
@ -3927,7 +3927,7 @@ func TestIssue6961(t *testing.T) {
pauseFolder(t, wcfg, fcfg.ID, true)
pauseFolder(t, wcfg, fcfg.ID, false)
if comp := m.Completion(device2, fcfg.ID); comp.NeedDeletes != 0 {
if comp := m.testCompletion(device2, fcfg.ID); comp.NeedDeletes != 0 {
t.Error("Expected 0 needed deletes, got", comp.NeedDeletes)
} else {
t.Log(comp)
@ -3946,7 +3946,7 @@ func TestCompletionEmptyGlobal(t *testing.T) {
files[0].Deleted = true
files[0].Version = files[0].Version.Update(device1.Short())
m.IndexUpdate(device1, fcfg.ID, files)
comp := m.Completion(protocol.LocalDeviceID, fcfg.ID)
comp := m.testCompletion(protocol.LocalDeviceID, fcfg.ID)
if comp.CompletionPct != 95 {
t.Error("Expected completion of 95%, got", comp.CompletionPct)
}
@ -3978,13 +3978,13 @@ func TestNeedMetaAfterIndexReset(t *testing.T) {
files[0].Sequence = seq
m.IndexUpdate(device1, fcfg.ID, files)
if comp := m.Completion(device2, fcfg.ID); comp.NeedItems != 1 {
if comp := m.testCompletion(device2, fcfg.ID); comp.NeedItems != 1 {
t.Error("Expected one needed item for device2, got", comp.NeedItems)
}
// Pretend we had an index reset on device 1
m.Index(device1, fcfg.ID, files)
if comp := m.Completion(device2, fcfg.ID); comp.NeedItems != 1 {
if comp := m.testCompletion(device2, fcfg.ID); comp.NeedItems != 1 {
t.Error("Expected one needed item for device2, got", comp.NeedItems)
}
}

View File

@ -152,6 +152,7 @@ func setupModel(t testing.TB, w config.Wrapper) *testModel {
type testModel struct {
*model
t testing.TB
cancel context.CancelFunc
evCancel context.CancelFunc
stopped chan struct{}
@ -171,6 +172,7 @@ func newModel(t testing.TB, cfg config.Wrapper, id protocol.DeviceID, clientName
model: m,
evCancel: cancel,
stopped: make(chan struct{}),
t: t,
}
}
@ -184,6 +186,24 @@ func (m *testModel) ServeBackground() {
<-m.started
}
func (m *testModel) testAvailability(folder string, file protocol.FileInfo, block protocol.BlockInfo) []Availability {
av, err := m.model.Availability(folder, file, block)
must(m.t, err)
return av
}
func (m *testModel) testCurrentFolderFile(folder string, file string) (protocol.FileInfo, bool) {
f, ok, err := m.model.CurrentFolderFile(folder, file)
must(m.t, err)
return f, ok
}
func (m *testModel) testCompletion(device protocol.DeviceID, folder string) FolderCompletion {
comp, err := m.Completion(protocol.LocalDeviceID, "default")
must(m.t, err)
return comp
}
func cleanupModel(m *testModel) {
if m.cancel != nil {
m.cancel()
@ -277,6 +297,15 @@ func dbSnapshot(t *testing.T, m Model, folder string) *db.Snapshot {
return snap
}
func fsetSnapshot(t *testing.T, fset *db.FileSet) *db.Snapshot {
t.Helper()
snap, err := fset.Snapshot()
if err != nil {
t.Fatal(err)
}
return snap
}
// Reach in and update the ignore matcher to one that always does
// reloads when asked to, instead of checking file mtimes. This is
// because we will be changing the files on disk often enough that the

View File

@ -38,6 +38,11 @@ func AsFatalErr(err error, status ExitStatus) *FatalErr {
}
}
func IsFatal(err error) bool {
ferr := &FatalErr{}
return errors.As(err, &ferr)
}
func (e *FatalErr) Error() string {
return e.Err.Error()
}