all: Handle errors opening db/creating file-set (ref #5907) (#7150)

This commit is contained in:
Simon Frei 2020-12-21 12:59:22 +01:00 committed by GitHub
parent b5de49917c
commit 78bd0341a8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 542 additions and 396 deletions

View File

@ -681,7 +681,11 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
appOpts.DBIndirectGCInterval = dur appOpts.DBIndirectGCInterval = dur
} }
app := syncthing.New(cfg, ldb, evLogger, cert, appOpts) app, err := syncthing.New(cfg, ldb, evLogger, cert, appOpts)
if err != nil {
l.Warnln("Failed to start Syncthing:", err)
os.Exit(util.ExitError.AsInt())
}
if autoUpgradePossible { if autoUpgradePossible {
go autoUpgrade(cfg, app, evLogger) go autoUpgrade(cfg, app, evLogger)

View File

@ -11,7 +11,6 @@ import (
"testing" "testing"
"github.com/syncthing/syncthing/lib/db" "github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
) )
@ -44,11 +43,11 @@ func lazyInitBenchFiles() {
} }
} }
func getBenchFileSet() (*db.Lowlevel, *db.FileSet) { func getBenchFileSet(b testing.TB) (*db.Lowlevel, *db.FileSet) {
lazyInitBenchFiles() lazyInitBenchFiles()
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(b)
benchS := db.NewFileSet("test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) benchS := newFileSet(b, "test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
replace(benchS, remoteDevice0, files) replace(benchS, remoteDevice0, files)
replace(benchS, protocol.LocalDeviceID, firstHalf) replace(benchS, protocol.LocalDeviceID, firstHalf)
@ -56,12 +55,12 @@ func getBenchFileSet() (*db.Lowlevel, *db.FileSet) {
} }
func BenchmarkReplaceAll(b *testing.B) { func BenchmarkReplaceAll(b *testing.B) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(b)
defer ldb.Close() defer ldb.Close()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
m := db.NewFileSet("test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) m := newFileSet(b, "test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
replace(m, protocol.LocalDeviceID, files) replace(m, protocol.LocalDeviceID, files)
} }
@ -69,7 +68,7 @@ func BenchmarkReplaceAll(b *testing.B) {
} }
func BenchmarkUpdateOneChanged(b *testing.B) { func BenchmarkUpdateOneChanged(b *testing.B) {
ldb, benchS := getBenchFileSet() ldb, benchS := getBenchFileSet(b)
defer ldb.Close() defer ldb.Close()
changed := make([]protocol.FileInfo, 1) changed := make([]protocol.FileInfo, 1)
@ -89,7 +88,7 @@ func BenchmarkUpdateOneChanged(b *testing.B) {
} }
func BenchmarkUpdate100Changed(b *testing.B) { func BenchmarkUpdate100Changed(b *testing.B) {
ldb, benchS := getBenchFileSet() ldb, benchS := getBenchFileSet(b)
defer ldb.Close() defer ldb.Close()
b.ResetTimer() b.ResetTimer()
@ -118,7 +117,7 @@ func setup10Remotes(benchS *db.FileSet) {
} }
func BenchmarkUpdate100Changed10Remotes(b *testing.B) { func BenchmarkUpdate100Changed10Remotes(b *testing.B) {
ldb, benchS := getBenchFileSet() ldb, benchS := getBenchFileSet(b)
defer ldb.Close() defer ldb.Close()
setup10Remotes(benchS) setup10Remotes(benchS)
@ -136,7 +135,7 @@ func BenchmarkUpdate100Changed10Remotes(b *testing.B) {
} }
func BenchmarkUpdate100ChangedRemote(b *testing.B) { func BenchmarkUpdate100ChangedRemote(b *testing.B) {
ldb, benchS := getBenchFileSet() ldb, benchS := getBenchFileSet(b)
defer ldb.Close() defer ldb.Close()
b.ResetTimer() b.ResetTimer()
@ -152,7 +151,7 @@ func BenchmarkUpdate100ChangedRemote(b *testing.B) {
} }
func BenchmarkUpdate100ChangedRemote10Remotes(b *testing.B) { func BenchmarkUpdate100ChangedRemote10Remotes(b *testing.B) {
ldb, benchS := getBenchFileSet() ldb, benchS := getBenchFileSet(b)
defer ldb.Close() defer ldb.Close()
b.ResetTimer() b.ResetTimer()
@ -168,7 +167,7 @@ func BenchmarkUpdate100ChangedRemote10Remotes(b *testing.B) {
} }
func BenchmarkUpdateOneUnchanged(b *testing.B) { func BenchmarkUpdateOneUnchanged(b *testing.B) {
ldb, benchS := getBenchFileSet() ldb, benchS := getBenchFileSet(b)
defer ldb.Close() defer ldb.Close()
b.ResetTimer() b.ResetTimer()
@ -180,7 +179,7 @@ func BenchmarkUpdateOneUnchanged(b *testing.B) {
} }
func BenchmarkNeedHalf(b *testing.B) { func BenchmarkNeedHalf(b *testing.B) {
ldb, benchS := getBenchFileSet() ldb, benchS := getBenchFileSet(b)
defer ldb.Close() defer ldb.Close()
b.ResetTimer() b.ResetTimer()
@ -201,9 +200,9 @@ func BenchmarkNeedHalf(b *testing.B) {
} }
func BenchmarkNeedHalfRemote(b *testing.B) { func BenchmarkNeedHalfRemote(b *testing.B) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(b)
defer ldb.Close() defer ldb.Close()
fset := db.NewFileSet("test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) fset := newFileSet(b, "test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
replace(fset, remoteDevice0, firstHalf) replace(fset, remoteDevice0, firstHalf)
replace(fset, protocol.LocalDeviceID, files) replace(fset, protocol.LocalDeviceID, files)
@ -225,7 +224,7 @@ func BenchmarkNeedHalfRemote(b *testing.B) {
} }
func BenchmarkHave(b *testing.B) { func BenchmarkHave(b *testing.B) {
ldb, benchS := getBenchFileSet() ldb, benchS := getBenchFileSet(b)
defer ldb.Close() defer ldb.Close()
b.ResetTimer() b.ResetTimer()
@ -246,7 +245,7 @@ func BenchmarkHave(b *testing.B) {
} }
func BenchmarkGlobal(b *testing.B) { func BenchmarkGlobal(b *testing.B) {
ldb, benchS := getBenchFileSet() ldb, benchS := getBenchFileSet(b)
defer ldb.Close() defer ldb.Close()
b.ResetTimer() b.ResetTimer()
@ -267,7 +266,7 @@ func BenchmarkGlobal(b *testing.B) {
} }
func BenchmarkNeedHalfTruncated(b *testing.B) { func BenchmarkNeedHalfTruncated(b *testing.B) {
ldb, benchS := getBenchFileSet() ldb, benchS := getBenchFileSet(b)
defer ldb.Close() defer ldb.Close()
b.ResetTimer() b.ResetTimer()
@ -288,7 +287,7 @@ func BenchmarkNeedHalfTruncated(b *testing.B) {
} }
func BenchmarkHaveTruncated(b *testing.B) { func BenchmarkHaveTruncated(b *testing.B) {
ldb, benchS := getBenchFileSet() ldb, benchS := getBenchFileSet(b)
defer ldb.Close() defer ldb.Close()
b.ResetTimer() b.ResetTimer()
@ -309,7 +308,7 @@ func BenchmarkHaveTruncated(b *testing.B) {
} }
func BenchmarkGlobalTruncated(b *testing.B) { func BenchmarkGlobalTruncated(b *testing.B) {
ldb, benchS := getBenchFileSet() ldb, benchS := getBenchFileSet(b)
defer ldb.Close() defer ldb.Close()
b.ResetTimer() b.ResetTimer()
@ -330,7 +329,7 @@ func BenchmarkGlobalTruncated(b *testing.B) {
} }
func BenchmarkNeedCount(b *testing.B) { func BenchmarkNeedCount(b *testing.B) {
ldb, benchS := getBenchFileSet() ldb, benchS := getBenchFileSet(b)
defer ldb.Close() defer ldb.Close()
benchS.Update(protocol.LocalDeviceID, changed100) benchS.Update(protocol.LocalDeviceID, changed100)

View File

@ -10,7 +10,6 @@ import (
"encoding/binary" "encoding/binary"
"testing" "testing"
"github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
) )
@ -36,10 +35,9 @@ func init() {
} }
} }
func setup() (*Lowlevel, *BlockFinder) { func setup(t testing.TB) (*Lowlevel, *BlockFinder) {
// Setup t.Helper()
db := newLowlevelMemory(t)
db := NewLowlevel(backend.OpenMemory())
return db, NewBlockFinder(db) return db, NewBlockFinder(db)
} }
@ -105,7 +103,7 @@ func discardFromBlockMap(db *Lowlevel, folder []byte, fs []protocol.FileInfo) er
} }
func TestBlockMapAddUpdateWipe(t *testing.T) { func TestBlockMapAddUpdateWipe(t *testing.T) {
db, f := setup() db, f := setup(t)
defer db.Close() defer db.Close()
if !dbEmpty(db) { if !dbEmpty(db) {
@ -193,7 +191,7 @@ func TestBlockMapAddUpdateWipe(t *testing.T) {
} }
func TestBlockFinderLookup(t *testing.T) { func TestBlockFinderLookup(t *testing.T) {
db, f := setup() db, f := setup(t)
defer db.Close() defer db.Close()
folder1 := []byte("folder1") folder1 := []byte("folder1")

View File

@ -13,6 +13,7 @@ import (
"testing" "testing"
"github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
) )
@ -35,17 +36,17 @@ func TestIgnoredFiles(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
db := NewLowlevel(ldb) db := newLowlevel(t, ldb)
defer db.Close() defer db.Close()
if err := UpdateSchema(db); err != nil { if err := UpdateSchema(db); err != nil {
t.Fatal(err) t.Fatal(err)
} }
fs := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db) fs := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db)
// The contents of the database are like this: // The contents of the database are like this:
// //
// fs := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db) // fs := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db)
// fs.Update(protocol.LocalDeviceID, []protocol.FileInfo{ // fs.Update(protocol.LocalDeviceID, []protocol.FileInfo{
// { // invalid (ignored) file // { // invalid (ignored) file
// Name: "foo", // Name: "foo",
@ -164,7 +165,7 @@ func TestUpdate0to3(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
db := NewLowlevel(ldb) db := newLowlevel(t, ldb)
defer db.Close() defer db.Close()
updater := schemaUpdater{db} updater := schemaUpdater{db}
@ -293,7 +294,7 @@ func TestUpdate0to3(t *testing.T) {
// TestRepairSequence checks that a few hand-crafted messed-up sequence entries get fixed. // TestRepairSequence checks that a few hand-crafted messed-up sequence entries get fixed.
func TestRepairSequence(t *testing.T) { func TestRepairSequence(t *testing.T) {
db := NewLowlevel(backend.OpenMemory()) db := newLowlevelMemory(t)
defer db.Close() defer db.Close()
folderStr := "test" folderStr := "test"
@ -397,7 +398,7 @@ func TestRepairSequence(t *testing.T) {
// Loading the metadata for the first time means a "re"calculation happens, // Loading the metadata for the first time means a "re"calculation happens,
// along which the sequences get repaired too. // along which the sequences get repaired too.
db.gcMut.RLock() db.gcMut.RLock()
_ = db.loadMetadataTracker(folderStr) _, err = db.loadMetadataTracker(folderStr)
db.gcMut.RUnlock() db.gcMut.RUnlock()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -466,7 +467,7 @@ func TestRepairSequence(t *testing.T) {
} }
func TestDowngrade(t *testing.T) { func TestDowngrade(t *testing.T) {
db := NewLowlevel(backend.OpenMemory()) db := newLowlevelMemory(t)
defer db.Close() defer db.Close()
// sets the min version etc // sets the min version etc
if err := UpdateSchema(db); err != nil { if err := UpdateSchema(db); err != nil {
@ -491,10 +492,10 @@ func TestDowngrade(t *testing.T) {
} }
func TestCheckGlobals(t *testing.T) { func TestCheckGlobals(t *testing.T) {
db := NewLowlevel(backend.OpenMemory()) db := newLowlevelMemory(t)
defer db.Close() defer db.Close()
fs := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), db) fs := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), db)
// Add any file // Add any file
name := "foo" name := "foo"
@ -532,14 +533,17 @@ func TestUpdateTo10(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
db := NewLowlevel(ldb) db := newLowlevel(t, ldb)
defer db.Close() defer db.Close()
UpdateSchema(db) UpdateSchema(db)
folder := "test" folder := "test"
meta := db.getMetaAndCheck(folder) meta, err := db.getMetaAndCheck(folder)
if err != nil {
t.Fatal(err)
}
empty := Counts{} empty := Counts{}
@ -643,9 +647,9 @@ func TestDropDuplicates(t *testing.T) {
func TestGCIndirect(t *testing.T) { func TestGCIndirect(t *testing.T) {
// Verify that the gcIndirect run actually removes block lists. // Verify that the gcIndirect run actually removes block lists.
db := NewLowlevel(backend.OpenMemory()) db := newLowlevelMemory(t)
defer db.Close() defer db.Close()
meta := newMetadataTracker(db.keyer) meta := newMetadataTracker(db.keyer, events.NoopLogger)
// Add three files with different block lists // Add three files with different block lists
@ -731,7 +735,7 @@ func TestGCIndirect(t *testing.T) {
} }
func TestUpdateTo14(t *testing.T) { func TestUpdateTo14(t *testing.T) {
db := NewLowlevel(backend.OpenMemory()) db := newLowlevelMemory(t)
defer db.Close() defer db.Close()
folderStr := "default" folderStr := "default"
@ -741,7 +745,10 @@ func TestUpdateTo14(t *testing.T) {
file.BlocksHash = protocol.BlocksHash(file.Blocks) file.BlocksHash = protocol.BlocksHash(file.Blocks)
fileWOBlocks := file fileWOBlocks := file
fileWOBlocks.Blocks = nil fileWOBlocks.Blocks = nil
meta := db.loadMetadataTracker(folderStr) meta, err := db.loadMetadataTracker(folderStr)
if err != nil {
t.Fatal(err)
}
// Initally add the correct file the usual way, all good here. // Initally add the correct file the usual way, all good here.
if err := db.updateLocalFiles(folder, []protocol.FileInfo{file}, meta); err != nil { if err := db.updateLocalFiles(folder, []protocol.FileInfo{file}, meta); err != nil {
@ -800,7 +807,7 @@ func TestFlushRecursion(t *testing.T) {
// Verify that a commit hook can write to the transaction without // Verify that a commit hook can write to the transaction without
// causing another flush and thus recursion. // causing another flush and thus recursion.
db := NewLowlevel(backend.OpenMemory()) db := newLowlevelMemory(t)
defer db.Close() defer db.Close()
// A commit hook that writes a small piece of data to the transaction. // A commit hook that writes a small piece of data to the transaction.
@ -838,11 +845,11 @@ func TestFlushRecursion(t *testing.T) {
} }
func TestCheckLocalNeed(t *testing.T) { func TestCheckLocalNeed(t *testing.T) {
db := NewLowlevel(backend.OpenMemory()) db := newLowlevelMemory(t)
defer db.Close() defer db.Close()
folderStr := "test" folderStr := "test"
fs := NewFileSet(folderStr, fs.NewFilesystem(fs.FilesystemTypeFake, ""), db) fs := newFileSet(t, folderStr, fs.NewFilesystem(fs.FilesystemTypeFake, ""), db)
// Add files such that we are in sync for a and b, and need c and d. // Add files such that we are in sync for a and b, and need c and d.
files := []protocol.FileInfo{ files := []protocol.FileInfo{
@ -913,13 +920,13 @@ func TestCheckLocalNeed(t *testing.T) {
} }
func TestDuplicateNeedCount(t *testing.T) { func TestDuplicateNeedCount(t *testing.T) {
db := NewLowlevel(backend.OpenMemory()) db := newLowlevelMemory(t)
defer db.Close() defer db.Close()
folder := "test" folder := "test"
testFs := fs.NewFilesystem(fs.FilesystemTypeFake, "") testFs := fs.NewFilesystem(fs.FilesystemTypeFake, "")
fs := NewFileSet(folder, testFs, db) fs := newFileSet(t, folder, testFs, db)
files := []protocol.FileInfo{{Name: "foo", Version: protocol.Vector{}.Update(myID), Sequence: 1}} files := []protocol.FileInfo{{Name: "foo", Version: protocol.Vector{}.Update(myID), Sequence: 1}}
fs.Update(protocol.LocalDeviceID, files) fs.Update(protocol.LocalDeviceID, files)
files[0].Version = files[0].Version.Update(remoteDevice0.Short()) files[0].Version = files[0].Version.Update(remoteDevice0.Short())
@ -927,7 +934,7 @@ func TestDuplicateNeedCount(t *testing.T) {
db.checkRepair() db.checkRepair()
fs = NewFileSet(folder, testFs, db) fs = newFileSet(t, folder, testFs, db)
found := false found := false
for _, c := range fs.meta.counts.Counts { for _, c := range fs.meta.counts.Counts {
if bytes.Equal(protocol.LocalDeviceID[:], c.DeviceID) && c.LocalFlags == needFlag { if bytes.Equal(protocol.LocalDeviceID[:], c.DeviceID) && c.LocalFlags == needFlag {

View File

@ -9,8 +9,6 @@ package db
import ( import (
"bytes" "bytes"
"testing" "testing"
"github.com/syncthing/syncthing/lib/db/backend"
) )
func TestDeviceKey(t *testing.T) { func TestDeviceKey(t *testing.T) {
@ -18,7 +16,7 @@ func TestDeviceKey(t *testing.T) {
dev := []byte("device67890123456789012345678901") dev := []byte("device67890123456789012345678901")
name := []byte("name") name := []byte("name")
db := NewLowlevel(backend.OpenMemory()) db := newLowlevelMemory(t)
defer db.Close() defer db.Close()
key, err := db.keyer.GenerateDeviceFileKey(nil, fld, dev, name) key, err := db.keyer.GenerateDeviceFileKey(nil, fld, dev, name)
@ -50,7 +48,7 @@ func TestGlobalKey(t *testing.T) {
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123") fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
name := []byte("name") name := []byte("name")
db := NewLowlevel(backend.OpenMemory()) db := newLowlevelMemory(t)
defer db.Close() defer db.Close()
key, err := db.keyer.GenerateGlobalVersionKey(nil, fld, name) key, err := db.keyer.GenerateGlobalVersionKey(nil, fld, name)
@ -67,7 +65,7 @@ func TestGlobalKey(t *testing.T) {
func TestSequenceKey(t *testing.T) { func TestSequenceKey(t *testing.T) {
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123") fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
db := NewLowlevel(backend.OpenMemory()) db := newLowlevelMemory(t)
defer db.Close() defer db.Close()
const seq = 1234567890 const seq = 1234567890

View File

@ -10,6 +10,7 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -19,6 +20,7 @@ import (
"github.com/dchest/siphash" "github.com/dchest/siphash"
"github.com/greatroar/blobloom" "github.com/greatroar/blobloom"
"github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/rand" "github.com/syncthing/syncthing/lib/rand"
@ -66,9 +68,10 @@ type Lowlevel struct {
indirectGCInterval time.Duration indirectGCInterval time.Duration
recheckInterval time.Duration recheckInterval time.Duration
oneFileSetCreated chan struct{} oneFileSetCreated chan struct{}
evLogger events.Logger
} }
func NewLowlevel(backend backend.Backend, opts ...Option) *Lowlevel { func NewLowlevel(backend backend.Backend, evLogger events.Logger, opts ...Option) (*Lowlevel, error) {
// Only log restarts in debug mode. // Only log restarts in debug mode.
spec := util.SpecWithDebugLogger(l) spec := util.SpecWithDebugLogger(l)
db := &Lowlevel{ db := &Lowlevel{
@ -80,6 +83,7 @@ func NewLowlevel(backend backend.Backend, opts ...Option) *Lowlevel {
indirectGCInterval: indirectGCDefaultInterval, indirectGCInterval: indirectGCDefaultInterval,
recheckInterval: recheckDefaultInterval, recheckInterval: recheckDefaultInterval,
oneFileSetCreated: make(chan struct{}), oneFileSetCreated: make(chan struct{}),
evLogger: evLogger,
} }
for _, opt := range opts { for _, opt := range opts {
opt(db) opt(db)
@ -89,11 +93,14 @@ func NewLowlevel(backend backend.Backend, opts ...Option) *Lowlevel {
if path := db.needsRepairPath(); path != "" { if path := db.needsRepairPath(); path != "" {
if _, err := os.Lstat(path); err == nil { if _, err := os.Lstat(path); err == nil {
l.Infoln("Database was marked for repair - this may take a while") l.Infoln("Database was marked for repair - this may take a while")
db.checkRepair() if err := db.checkRepair(); err != nil {
db.handleFailure(err)
return nil, err
}
os.Remove(path) os.Remove(path)
} }
} }
return db return db, nil
} }
type Option func(*Lowlevel) type Option func(*Lowlevel)
@ -822,29 +829,22 @@ func (b *bloomFilter) hash(id []byte) uint64 {
} }
// checkRepair checks folder metadata and sequences for miscellaneous errors. // checkRepair checks folder metadata and sequences for miscellaneous errors.
func (db *Lowlevel) checkRepair() { func (db *Lowlevel) checkRepair() error {
for _, folder := range db.ListFolders() { for _, folder := range db.ListFolders() {
_ = db.getMetaAndCheck(folder) if _, err := db.getMetaAndCheck(folder); err != nil {
return err
}
} }
return nil
} }
func (db *Lowlevel) getMetaAndCheck(folder string) *metadataTracker { func (db *Lowlevel) getMetaAndCheck(folder string) (*metadataTracker, error) {
db.gcMut.RLock() db.gcMut.RLock()
defer db.gcMut.RUnlock() defer db.gcMut.RUnlock()
var err error fixed, err := db.checkLocalNeed([]byte(folder))
defer func() {
if err != nil && !backend.IsClosed(err) {
l.Warnf("Fatal error: %v", err)
obfuscateAndPanic(err)
}
}()
var fixed int
fixed, err = db.checkLocalNeed([]byte(folder))
if err != nil { if err != nil {
err = fmt.Errorf("checking local need: %w", err) return nil, fmt.Errorf("checking local need: %w", err)
return nil
} }
if fixed != 0 { if fixed != 0 {
l.Infof("Repaired %d local need entries for folder %v in database", fixed, folder) l.Infof("Repaired %d local need entries for folder %v in database", fixed, folder)
@ -852,24 +852,22 @@ func (db *Lowlevel) getMetaAndCheck(folder string) *metadataTracker {
meta, err := db.recalcMeta(folder) meta, err := db.recalcMeta(folder)
if err != nil { if err != nil {
err = fmt.Errorf("recalculating metadata: %w", err) return nil, fmt.Errorf("recalculating metadata: %w", err)
return nil
} }
fixed, err = db.repairSequenceGCLocked(folder, meta) fixed, err = db.repairSequenceGCLocked(folder, meta)
if err != nil { if err != nil {
err = fmt.Errorf("repairing sequences: %w", err) return nil, fmt.Errorf("repairing sequences: %w", err)
return nil
} }
if fixed != 0 { if fixed != 0 {
l.Infof("Repaired %d sequence entries for folder %v in database", fixed, folder) l.Infof("Repaired %d sequence entries for folder %v in database", fixed, folder)
} }
return meta return meta, nil
} }
func (db *Lowlevel) loadMetadataTracker(folder string) *metadataTracker { func (db *Lowlevel) loadMetadataTracker(folder string) (*metadataTracker, error) {
meta := newMetadataTracker(db.keyer) meta := newMetadataTracker(db.keyer, db.evLogger)
if err := meta.fromDB(db, []byte(folder)); err != nil { if err := meta.fromDB(db, []byte(folder)); err != nil {
if err == errMetaInconsistent { if err == errMetaInconsistent {
l.Infof("Stored folder metadata for %q is inconsistent; recalculating", folder) l.Infof("Stored folder metadata for %q is inconsistent; recalculating", folder)
@ -881,7 +879,9 @@ func (db *Lowlevel) loadMetadataTracker(folder string) *metadataTracker {
} }
curSeq := meta.Sequence(protocol.LocalDeviceID) curSeq := meta.Sequence(protocol.LocalDeviceID)
if metaOK := db.verifyLocalSequence(curSeq, folder); !metaOK { if metaOK, err := db.verifyLocalSequence(curSeq, folder); err != nil {
return nil, fmt.Errorf("verifying sequences: %w", err)
} else if !metaOK {
l.Infof("Stored folder metadata for %q is out of date after crash; recalculating", folder) l.Infof("Stored folder metadata for %q is out of date after crash; recalculating", folder)
return db.getMetaAndCheck(folder) return db.getMetaAndCheck(folder)
} }
@ -891,13 +891,13 @@ func (db *Lowlevel) loadMetadataTracker(folder string) *metadataTracker {
return db.getMetaAndCheck(folder) return db.getMetaAndCheck(folder)
} }
return meta return meta, nil
} }
func (db *Lowlevel) recalcMeta(folderStr string) (*metadataTracker, error) { func (db *Lowlevel) recalcMeta(folderStr string) (*metadataTracker, error) {
folder := []byte(folderStr) folder := []byte(folderStr)
meta := newMetadataTracker(db.keyer) meta := newMetadataTracker(db.keyer, db.evLogger)
if err := db.checkGlobals(folder); err != nil { if err := db.checkGlobals(folder); err != nil {
return nil, fmt.Errorf("checking globals: %w", err) return nil, fmt.Errorf("checking globals: %w", err)
} }
@ -951,7 +951,7 @@ func (db *Lowlevel) recalcMeta(folderStr string) (*metadataTracker, error) {
// Verify the local sequence number from actual sequence entries. Returns // Verify the local sequence number from actual sequence entries. Returns
// true if it was all good, or false if a fixup was necessary. // true if it was all good, or false if a fixup was necessary.
func (db *Lowlevel) verifyLocalSequence(curSeq int64, folder string) bool { func (db *Lowlevel) verifyLocalSequence(curSeq int64, folder string) (bool, error) {
// Walk the sequence index from the current (supposedly) highest // Walk the sequence index from the current (supposedly) highest
// sequence number and raise the alarm if we get anything. This recovers // sequence number and raise the alarm if we get anything. This recovers
// from the occasion where we have written sequence entries to disk but // from the occasion where we have written sequence entries to disk but
@ -964,20 +964,18 @@ func (db *Lowlevel) verifyLocalSequence(curSeq int64, folder string) bool {
t, err := db.newReadOnlyTransaction() t, err := db.newReadOnlyTransaction()
if err != nil { if err != nil {
l.Warnf("Fatal error: %v", err) return false, err
obfuscateAndPanic(err)
} }
ok := true ok := true
if err := t.withHaveSequence([]byte(folder), curSeq+1, func(fi protocol.FileIntf) bool { if err := t.withHaveSequence([]byte(folder), curSeq+1, func(fi protocol.FileIntf) bool {
ok = false // we got something, which we should not have ok = false // we got something, which we should not have
return false return false
}); err != nil && !backend.IsClosed(err) { }); err != nil {
l.Warnf("Fatal error: %v", err) return false, err
obfuscateAndPanic(err)
} }
t.close() t.close()
return ok return ok, nil
} }
// repairSequenceGCLocked makes sure the sequence numbers in the sequence keys // repairSequenceGCLocked makes sure the sequence numbers in the sequence keys
@ -1177,6 +1175,17 @@ func (db *Lowlevel) needsRepairPath() string {
return path + needsRepairSuffix return path + needsRepairSuffix
} }
func (db *Lowlevel) checkErrorForRepair(err error) {
if errors.Is(err, errEntryFromGlobalMissing) || errors.Is(err, errEmptyGlobal) {
// Inconsistency error, mark db for repair on next start.
if path := db.needsRepairPath(); path != "" {
if fd, err := os.Create(path); err == nil {
fd.Close()
}
}
}
}
// unchanged checks if two files are the same and thus don't need to be updated. // unchanged checks if two files are the same and thus don't need to be updated.
// Local flags or the invalid bit might change without the version // Local flags or the invalid bit might change without the version
// being bumped. // being bumped.
@ -1184,8 +1193,15 @@ func unchanged(nf, ef protocol.FileIntf) bool {
return ef.FileVersion().Equal(nf.FileVersion()) && ef.IsInvalid() == nf.IsInvalid() && ef.FileLocalFlags() == nf.FileLocalFlags() return ef.FileVersion().Equal(nf.FileVersion()) && ef.IsInvalid() == nf.IsInvalid() && ef.FileLocalFlags() == nf.FileLocalFlags()
} }
func (db *Lowlevel) handleFailure(err error) {
db.checkErrorForRepair(err)
if shouldReportFailure(err) {
db.evLogger.Log(events.Failure, err)
}
}
var ldbPathRe = regexp.MustCompile(`(open|write|read) .+[\\/].+[\\/]index[^\\/]+[\\/][^\\/]+: `) var ldbPathRe = regexp.MustCompile(`(open|write|read) .+[\\/].+[\\/]index[^\\/]+[\\/][^\\/]+: `)
func obfuscateAndPanic(err error) { func shouldReportFailure(err error) bool {
panic(ldbPathRe.ReplaceAllString(err.Error(), "$1 x: ")) return !ldbPathRe.MatchString(err.Error())
} }

View File

@ -9,10 +9,12 @@ package db
import ( import (
"bytes" "bytes"
"errors" "errors"
"fmt"
"math/bits" "math/bits"
"time" "time"
"github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sync" "github.com/syncthing/syncthing/lib/sync"
) )
@ -28,8 +30,9 @@ type countsMap struct {
type metadataTracker struct { type metadataTracker struct {
keyer keyer keyer keyer
countsMap countsMap
mut sync.RWMutex mut sync.RWMutex
dirty bool dirty bool
evLogger events.Logger
} }
type metaKey struct { type metaKey struct {
@ -39,13 +42,14 @@ type metaKey struct {
const needFlag uint32 = 1 << 31 // Last bit, as early ones are local flags const needFlag uint32 = 1 << 31 // Last bit, as early ones are local flags
func newMetadataTracker(keyer keyer) *metadataTracker { func newMetadataTracker(keyer keyer, evLogger events.Logger) *metadataTracker {
return &metadataTracker{ return &metadataTracker{
keyer: keyer, keyer: keyer,
mut: sync.NewRWMutex(), mut: sync.NewRWMutex(),
countsMap: countsMap{ countsMap: countsMap{
indexes: make(map[metaKey]int), indexes: make(map[metaKey]int),
}, },
evLogger: evLogger,
} }
} }
@ -296,18 +300,22 @@ func (m *metadataTracker) removeFileLocked(dev protocol.DeviceID, flag uint32, f
// the created timestamp to zero. Next time we start up the metadata // the created timestamp to zero. Next time we start up the metadata
// will be seen as infinitely old and recalculated from scratch. // will be seen as infinitely old and recalculated from scratch.
if cp.Deleted < 0 { if cp.Deleted < 0 {
m.evLogger.Log(events.Failure, fmt.Sprintf("meta deleted count for flag 0x%x dropped below zero", flag))
cp.Deleted = 0 cp.Deleted = 0
m.counts.Created = 0 m.counts.Created = 0
} }
if cp.Files < 0 { if cp.Files < 0 {
m.evLogger.Log(events.Failure, fmt.Sprintf("meta files count for flag 0x%x dropped below zero", flag))
cp.Files = 0 cp.Files = 0
m.counts.Created = 0 m.counts.Created = 0
} }
if cp.Directories < 0 { if cp.Directories < 0 {
m.evLogger.Log(events.Failure, fmt.Sprintf("meta directories count for flag 0x%x dropped below zero", flag))
cp.Directories = 0 cp.Directories = 0
m.counts.Created = 0 m.counts.Created = 0
} }
if cp.Symlinks < 0 { if cp.Symlinks < 0 {
m.evLogger.Log(events.Failure, fmt.Sprintf("meta deleted count for flag 0x%x dropped below zero", flag))
cp.Symlinks = 0 cp.Symlinks = 0
m.counts.Created = 0 m.counts.Created = 0
} }

View File

@ -11,7 +11,7 @@ import (
"sort" "sort"
"testing" "testing"
"github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
) )
@ -52,7 +52,7 @@ func TestEachFlagBit(t *testing.T) {
func TestMetaDevices(t *testing.T) { func TestMetaDevices(t *testing.T) {
d1 := protocol.DeviceID{1} d1 := protocol.DeviceID{1}
d2 := protocol.DeviceID{2} d2 := protocol.DeviceID{2}
meta := newMetadataTracker(nil) meta := newMetadataTracker(nil, events.NoopLogger)
meta.addFile(d1, protocol.FileInfo{Sequence: 1}) meta.addFile(d1, protocol.FileInfo{Sequence: 1})
meta.addFile(d1, protocol.FileInfo{Sequence: 2, LocalFlags: 1}) meta.addFile(d1, protocol.FileInfo{Sequence: 2, LocalFlags: 1})
@ -85,7 +85,7 @@ func TestMetaDevices(t *testing.T) {
func TestMetaSequences(t *testing.T) { func TestMetaSequences(t *testing.T) {
d1 := protocol.DeviceID{1} d1 := protocol.DeviceID{1}
meta := newMetadataTracker(nil) meta := newMetadataTracker(nil, events.NoopLogger)
meta.addFile(d1, protocol.FileInfo{Sequence: 1}) meta.addFile(d1, protocol.FileInfo{Sequence: 1})
meta.addFile(d1, protocol.FileInfo{Sequence: 2, RawInvalid: true}) meta.addFile(d1, protocol.FileInfo{Sequence: 2, RawInvalid: true})
@ -105,11 +105,11 @@ func TestMetaSequences(t *testing.T) {
} }
func TestRecalcMeta(t *testing.T) { func TestRecalcMeta(t *testing.T) {
ldb := NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
// Add some files // Add some files
s1 := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, "fake"), ldb) s1 := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeFake, "fake"), ldb)
files := []protocol.FileInfo{ files := []protocol.FileInfo{
{Name: "a", Size: 1000}, {Name: "a", Size: 1000},
{Name: "b", Size: 2000}, {Name: "b", Size: 2000},
@ -161,7 +161,7 @@ func TestRecalcMeta(t *testing.T) {
} }
// Create a new fileset, which will realize the inconsistency and recalculate // Create a new fileset, which will realize the inconsistency and recalculate
s2 := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, "fake"), ldb) s2 := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeFake, "fake"), ldb)
// Verify local/global size // Verify local/global size
snap = s2.Snapshot() snap = s2.Snapshot()

View File

@ -9,12 +9,10 @@ package db
import ( import (
"testing" "testing"
"time" "time"
"github.com/syncthing/syncthing/lib/db/backend"
) )
func TestNamespacedInt(t *testing.T) { func TestNamespacedInt(t *testing.T) {
ldb := NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
n1 := NewNamespacedKV(ldb, "foo") n1 := NewNamespacedKV(ldb, "foo")
@ -62,7 +60,7 @@ func TestNamespacedInt(t *testing.T) {
} }
func TestNamespacedTime(t *testing.T) { func TestNamespacedTime(t *testing.T) {
ldb := NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
n1 := NewNamespacedKV(ldb, "foo") n1 := NewNamespacedKV(ldb, "foo")
@ -86,7 +84,7 @@ func TestNamespacedTime(t *testing.T) {
} }
func TestNamespacedString(t *testing.T) { func TestNamespacedString(t *testing.T) {
ldb := NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
n1 := NewNamespacedKV(ldb, "foo") n1 := NewNamespacedKV(ldb, "foo")
@ -109,7 +107,7 @@ func TestNamespacedString(t *testing.T) {
} }
func TestNamespacedReset(t *testing.T) { func TestNamespacedReset(t *testing.T) {
ldb := NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
n1 := NewNamespacedKV(ldb, "foo") n1 := NewNamespacedKV(ldb, "foo")

View File

@ -719,7 +719,7 @@ func (db *schemaUpdater) updateSchemaTo14(_ int) error {
var key, gk []byte var key, gk []byte
for _, folderStr := range db.ListFolders() { for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr) folder := []byte(folderStr)
meta := newMetadataTracker(db.keyer) meta := newMetadataTracker(db.keyer, db.evLogger)
meta.counts.Created = 0 // Recalculate metadata afterwards meta.counts.Created = 0 // Recalculate metadata afterwards
t, err := db.newReadWriteTransaction(meta.CommitHook(folder)) t, err := db.newReadWriteTransaction(meta.CommitHook(folder))

View File

@ -13,9 +13,7 @@
package db package db
import ( import (
"errors"
"fmt" "fmt"
"os"
"github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/fs"
@ -38,17 +36,22 @@ type FileSet struct {
// continue iteration, false to stop. // continue iteration, false to stop.
type Iterator func(f protocol.FileIntf) bool type Iterator func(f protocol.FileIntf) bool
func NewFileSet(folder string, fs fs.Filesystem, db *Lowlevel) *FileSet { func NewFileSet(folder string, fs fs.Filesystem, db *Lowlevel) (*FileSet, error) {
select { select {
case <-db.oneFileSetCreated: case <-db.oneFileSetCreated:
default: default:
close(db.oneFileSetCreated) close(db.oneFileSetCreated)
} }
meta, err := db.loadMetadataTracker(folder)
if err != nil {
db.handleFailure(err)
return nil, err
}
s := &FileSet{ s := &FileSet{
folder: folder, folder: folder,
fs: fs, fs: fs,
db: db, db: db,
meta: db.loadMetadataTracker(folder), meta: meta,
updateMutex: sync.NewMutex(), updateMutex: sync.NewMutex(),
} }
if id := s.IndexID(protocol.LocalDeviceID); id == 0 { if id := s.IndexID(protocol.LocalDeviceID); id == 0 {
@ -59,7 +62,7 @@ func NewFileSet(folder string, fs fs.Filesystem, db *Lowlevel) *FileSet {
fatalError(err, fmt.Sprintf("%s Creating new IndexID", s.folder), s.db) fatalError(err, fmt.Sprintf("%s Creating new IndexID", s.folder), s.db)
} }
} }
return s return s, nil
} }
func (s *FileSet) Drop(device protocol.DeviceID) { func (s *FileSet) Drop(device protocol.DeviceID) {
@ -500,14 +503,7 @@ func nativeFileIterator(fn Iterator) Iterator {
} }
func fatalError(err error, opStr string, db *Lowlevel) { func fatalError(err error, opStr string, db *Lowlevel) {
if errors.Is(err, errEntryFromGlobalMissing) || errors.Is(err, errEmptyGlobal) { db.checkErrorForRepair(err)
// Inconsistency error, mark db for repair on next start.
if path := db.needsRepairPath(); path != "" {
if fd, err := os.Create(path); err == nil {
fd.Close()
}
}
}
l.Warnf("Fatal error: %v: %v", opStr, err) l.Warnf("Fatal error: %v: %v", opStr, err)
obfuscateAndPanic(err) panic(ldbPathRe.ReplaceAllString(err.Error(), "$1 x: "))
} }

View File

@ -18,6 +18,7 @@ import (
"github.com/d4l3k/messagediff" "github.com/d4l3k/messagediff"
"github.com/syncthing/syncthing/lib/db" "github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
) )
@ -142,10 +143,10 @@ func setBlocksHash(files fileList) {
} }
func TestGlobalSet(t *testing.T) { func TestGlobalSet(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) m := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
local0 := fileList{ local0 := fileList{
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)}, protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
@ -448,10 +449,10 @@ func TestGlobalSet(t *testing.T) {
} }
func TestNeedWithInvalid(t *testing.T) { func TestNeedWithInvalid(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
localHave := fileList{ localHave := fileList{
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)}, protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
@ -488,11 +489,11 @@ func TestNeedWithInvalid(t *testing.T) {
} }
func TestUpdateToInvalid(t *testing.T) { func TestUpdateToInvalid(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
folder := "test" folder := "test"
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
f := db.NewBlockFinder(ldb) f := db.NewBlockFinder(ldb)
localHave := fileList{ localHave := fileList{
@ -545,10 +546,10 @@ func TestUpdateToInvalid(t *testing.T) {
} }
func TestInvalidAvailability(t *testing.T) { func TestInvalidAvailability(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
remote0Have := fileList{ remote0Have := fileList{
protocol.FileInfo{Name: "both", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)}, protocol.FileInfo{Name: "both", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)},
@ -587,10 +588,10 @@ func TestInvalidAvailability(t *testing.T) {
} }
func TestGlobalReset(t *testing.T) { func TestGlobalReset(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) m := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
local := []protocol.FileInfo{ local := []protocol.FileInfo{
{Name: "a", Sequence: 1, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}}, {Name: "a", Sequence: 1, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
@ -626,10 +627,10 @@ func TestGlobalReset(t *testing.T) {
} }
func TestNeed(t *testing.T) { func TestNeed(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) m := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
local := []protocol.FileInfo{ local := []protocol.FileInfo{
{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}}, {Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
@ -667,10 +668,10 @@ func TestNeed(t *testing.T) {
} }
func TestSequence(t *testing.T) { func TestSequence(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) m := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
local1 := []protocol.FileInfo{ local1 := []protocol.FileInfo{
{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}}, {Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
@ -698,10 +699,10 @@ func TestSequence(t *testing.T) {
} }
func TestListDropFolder(t *testing.T) { func TestListDropFolder(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s0 := db.NewFileSet("test0", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s0 := newFileSet(t, "test0", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
local1 := []protocol.FileInfo{ local1 := []protocol.FileInfo{
{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}}, {Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}}, {Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
@ -709,7 +710,7 @@ func TestListDropFolder(t *testing.T) {
} }
replace(s0, protocol.LocalDeviceID, local1) replace(s0, protocol.LocalDeviceID, local1)
s1 := db.NewFileSet("test1", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s1 := newFileSet(t, "test1", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
local2 := []protocol.FileInfo{ local2 := []protocol.FileInfo{
{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}}, {Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}},
{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}}, {Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}},
@ -749,10 +750,10 @@ func TestListDropFolder(t *testing.T) {
} }
func TestGlobalNeedWithInvalid(t *testing.T) { func TestGlobalNeedWithInvalid(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("test1", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, "test1", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
rem0 := fileList{ rem0 := fileList{
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(4)}, protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(4)},
@ -792,10 +793,10 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
} }
func TestLongPath(t *testing.T) { func TestLongPath(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
var b bytes.Buffer var b bytes.Buffer
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
@ -833,13 +834,13 @@ func BenchmarkUpdateOneFile(b *testing.B) {
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
ldb := db.NewLowlevel(be) ldb := newLowlevel(b, be)
defer func() { defer func() {
ldb.Close() ldb.Close()
os.RemoveAll("testdata/benchmarkupdate.db") os.RemoveAll("testdata/benchmarkupdate.db")
}() }()
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) m := newFileSet(b, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
replace(m, protocol.LocalDeviceID, local0) replace(m, protocol.LocalDeviceID, local0)
l := local0[4:5] l := local0[4:5]
@ -852,10 +853,10 @@ func BenchmarkUpdateOneFile(b *testing.B) {
} }
func TestIndexID(t *testing.T) { func TestIndexID(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
// The Index ID for some random device is zero by default. // The Index ID for some random device is zero by default.
id := s.IndexID(remoteDevice0) id := s.IndexID(remoteDevice0)
@ -885,9 +886,9 @@ func TestIndexID(t *testing.T) {
} }
func TestDropFiles(t *testing.T) { func TestDropFiles(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) m := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
local0 := fileList{ local0 := fileList{
protocol.FileInfo{Name: "a", Sequence: 1, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)}, protocol.FileInfo{Name: "a", Sequence: 1, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
@ -948,10 +949,10 @@ func TestDropFiles(t *testing.T) {
} }
func TestIssue4701(t *testing.T) { func TestIssue4701(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
localHave := fileList{ localHave := fileList{
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}}, protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
@ -990,11 +991,11 @@ func TestIssue4701(t *testing.T) {
} }
func TestWithHaveSequence(t *testing.T) { func TestWithHaveSequence(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
folder := "test" folder := "test"
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
// The files must not be in alphabetical order // The files must not be in alphabetical order
localHave := fileList{ localHave := fileList{
@ -1028,11 +1029,11 @@ func TestStressWithHaveSequence(t *testing.T) {
t.Skip("Takes a long time") t.Skip("Takes a long time")
} }
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
folder := "test" folder := "test"
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
var localHave []protocol.FileInfo var localHave []protocol.FileInfo
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
@ -1073,11 +1074,11 @@ loop:
} }
func TestIssue4925(t *testing.T) { func TestIssue4925(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
folder := "test" folder := "test"
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
localHave := fileList{ localHave := fileList{
protocol.FileInfo{Name: "dir"}, protocol.FileInfo{Name: "dir"},
@ -1100,12 +1101,12 @@ func TestIssue4925(t *testing.T) {
} }
func TestMoveGlobalBack(t *testing.T) { func TestMoveGlobalBack(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
folder := "test" folder := "test"
file := "foo" file := "foo"
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
localHave := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}, Blocks: genBlocks(1), ModifiedS: 10, Size: 1}} localHave := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}, Blocks: genBlocks(1), ModifiedS: 10, Size: 1}}
remote0Have := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}, {ID: remoteDevice0.Short(), Value: 1}}}, Blocks: genBlocks(2), ModifiedS: 0, Size: 2}} remote0Have := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}, {ID: remoteDevice0.Short(), Value: 1}}}, Blocks: genBlocks(2), ModifiedS: 0, Size: 2}}
@ -1169,12 +1170,12 @@ func TestMoveGlobalBack(t *testing.T) {
// needed files. // needed files.
// https://github.com/syncthing/syncthing/issues/5007 // https://github.com/syncthing/syncthing/issues/5007
func TestIssue5007(t *testing.T) { func TestIssue5007(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
folder := "test" folder := "test"
file := "foo" file := "foo"
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
fs := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}}} fs := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}}}
@ -1199,12 +1200,12 @@ func TestIssue5007(t *testing.T) {
// TestNeedDeleted checks that a file that doesn't exist locally isn't needed // TestNeedDeleted checks that a file that doesn't exist locally isn't needed
// when the global file is deleted. // when the global file is deleted.
func TestNeedDeleted(t *testing.T) { func TestNeedDeleted(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
folder := "test" folder := "test"
file := "foo" file := "foo"
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
fs := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}, Deleted: true}} fs := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}, Deleted: true}}
@ -1237,11 +1238,11 @@ func TestNeedDeleted(t *testing.T) {
} }
func TestReceiveOnlyAccounting(t *testing.T) { func TestReceiveOnlyAccounting(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
folder := "test" folder := "test"
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
local := protocol.DeviceID{1} local := protocol.DeviceID{1}
remote := protocol.DeviceID{2} remote := protocol.DeviceID{2}
@ -1342,12 +1343,12 @@ func TestReceiveOnlyAccounting(t *testing.T) {
} }
func TestNeedAfterUnignore(t *testing.T) { func TestNeedAfterUnignore(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
folder := "test" folder := "test"
file := "foo" file := "foo"
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
remID := remoteDevice0.Short() remID := remoteDevice0.Short()
@ -1376,9 +1377,9 @@ func TestNeedAfterUnignore(t *testing.T) {
func TestRemoteInvalidNotAccounted(t *testing.T) { func TestRemoteInvalidNotAccounted(t *testing.T) {
// Remote files with the invalid bit should not count. // Remote files with the invalid bit should not count.
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
files := []protocol.FileInfo{ files := []protocol.FileInfo{
{Name: "a", Size: 1234, Sequence: 42, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}}, // valid, should count {Name: "a", Size: 1234, Sequence: 42, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}}, // valid, should count
@ -1396,10 +1397,10 @@ func TestRemoteInvalidNotAccounted(t *testing.T) {
} }
func TestNeedWithNewerInvalid(t *testing.T) { func TestNeedWithNewerInvalid(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("default", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, "default", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
rem0ID := remoteDevice0.Short() rem0ID := remoteDevice0.Short()
rem1ID := remoteDevice1.Short() rem1ID := remoteDevice1.Short()
@ -1437,11 +1438,11 @@ func TestNeedWithNewerInvalid(t *testing.T) {
} }
func TestNeedAfterDeviceRemove(t *testing.T) { func TestNeedAfterDeviceRemove(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
file := "foo" file := "foo"
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
fs := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}}} fs := fileList{{Name: file, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1}}}}}
@ -1466,9 +1467,9 @@ func TestNeedAfterDeviceRemove(t *testing.T) {
func TestCaseSensitive(t *testing.T) { func TestCaseSensitive(t *testing.T) {
// Normal case sensitive lookup should work // Normal case sensitive lookup should work
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
local := []protocol.FileInfo{ local := []protocol.FileInfo{
{Name: filepath.FromSlash("D1/f1"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}}, {Name: filepath.FromSlash("D1/f1"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
@ -1504,9 +1505,9 @@ func TestSequenceIndex(t *testing.T) {
// Set up a db and a few files that we will manipulate. // Set up a db and a few files that we will manipulate.
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
local := []protocol.FileInfo{ local := []protocol.FileInfo{
{Name: filepath.FromSlash("banana"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}}, {Name: filepath.FromSlash("banana"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
@ -1598,11 +1599,11 @@ func TestSequenceIndex(t *testing.T) {
} }
func TestIgnoreAfterReceiveOnly(t *testing.T) { func TestIgnoreAfterReceiveOnly(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
file := "foo" file := "foo"
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
fs := fileList{{ fs := fileList{{
Name: file, Name: file,
@ -1629,11 +1630,11 @@ func TestIgnoreAfterReceiveOnly(t *testing.T) {
// https://github.com/syncthing/syncthing/issues/6650 // https://github.com/syncthing/syncthing/issues/6650
func TestUpdateWithOneFileTwice(t *testing.T) { func TestUpdateWithOneFileTwice(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
file := "foo" file := "foo"
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb)
fs := fileList{{ fs := fileList{{
Name: file, Name: file,
@ -1667,10 +1668,10 @@ func TestUpdateWithOneFileTwice(t *testing.T) {
// https://github.com/syncthing/syncthing/issues/6668 // https://github.com/syncthing/syncthing/issues/6668
func TestNeedRemoteOnly(t *testing.T) { func TestNeedRemoteOnly(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb)
remote0Have := fileList{ remote0Have := fileList{
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)}, protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)},
@ -1685,10 +1686,10 @@ func TestNeedRemoteOnly(t *testing.T) {
// https://github.com/syncthing/syncthing/issues/6784 // https://github.com/syncthing/syncthing/issues/6784
func TestNeedRemoteAfterReset(t *testing.T) { func TestNeedRemoteAfterReset(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb)
files := fileList{ files := fileList{
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)}, protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)},
@ -1711,10 +1712,10 @@ func TestNeedRemoteAfterReset(t *testing.T) {
// https://github.com/syncthing/syncthing/issues/6850 // https://github.com/syncthing/syncthing/issues/6850
func TestIgnoreLocalChanged(t *testing.T) { func TestIgnoreLocalChanged(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb)
// Add locally changed file // Add locally changed file
files := fileList{ files := fileList{
@ -1745,10 +1746,10 @@ func TestIgnoreLocalChanged(t *testing.T) {
// an Index (as opposed to an IndexUpdate), and we don't want to loose the index // an Index (as opposed to an IndexUpdate), and we don't want to loose the index
// ID when that happens. // ID when that happens.
func TestNoIndexIDResetOnDrop(t *testing.T) { func TestNoIndexIDResetOnDrop(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
defer ldb.Close() defer ldb.Close()
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb)
s.SetIndexID(remoteDevice0, 1) s.SetIndexID(remoteDevice0, 1)
s.Drop(remoteDevice0) s.Drop(remoteDevice0)
@ -1770,8 +1771,8 @@ func TestConcurrentIndexID(t *testing.T) {
max = 10 max = 10
} }
for i := 0; i < max; i++ { for i := 0; i < max; i++ {
ldb := db.NewLowlevel(backend.OpenMemory()) ldb := newLowlevelMemory(t)
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb) s := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), ldb)
go setID(s, 0) go setID(s, 0)
go setID(s, 1) go setID(s, 1)
<-done <-done
@ -1837,3 +1838,25 @@ func checkNeed(t testing.TB, s *db.FileSet, dev protocol.DeviceID, expected []pr
t.Errorf("Count incorrect (%v): expected %v, got %v", dev, exp, counts) t.Errorf("Count incorrect (%v): expected %v, got %v", dev, exp, counts)
} }
} }
func newLowlevel(t testing.TB, backend backend.Backend) *db.Lowlevel {
t.Helper()
ll, err := db.NewLowlevel(backend, events.NoopLogger)
if err != nil {
t.Fatal(err)
}
return ll
}
func newLowlevelMemory(t testing.TB) *db.Lowlevel {
return newLowlevel(t, backend.OpenMemory())
}
func newFileSet(t testing.TB, folder string, fs fs.Filesystem, ll *db.Lowlevel) *db.FileSet {
t.Helper()
fset, err := db.NewFileSet(folder, fs, ll)
if err != nil {
t.Fatal(err)
}
return fset
}

View File

@ -8,12 +8,10 @@ package db
import ( import (
"testing" "testing"
"github.com/syncthing/syncthing/lib/db/backend"
) )
func TestSmallIndex(t *testing.T) { func TestSmallIndex(t *testing.T) {
db := NewLowlevel(backend.OpenMemory()) db := newLowlevelMemory(t)
idx := newSmallIndex(db, []byte{12, 34}) idx := newSmallIndex(db, []byte{12, 34})
// ID zero should be unallocated // ID zero should be unallocated

View File

@ -12,6 +12,7 @@ import (
"fmt" "fmt"
"github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/osutil" "github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
) )
@ -25,7 +26,8 @@ var (
// A readOnlyTransaction represents a database snapshot. // A readOnlyTransaction represents a database snapshot.
type readOnlyTransaction struct { type readOnlyTransaction struct {
backend.ReadTransaction backend.ReadTransaction
keyer keyer keyer keyer
evLogger events.Logger
} }
func (db *Lowlevel) newReadOnlyTransaction() (readOnlyTransaction, error) { func (db *Lowlevel) newReadOnlyTransaction() (readOnlyTransaction, error) {
@ -36,6 +38,7 @@ func (db *Lowlevel) newReadOnlyTransaction() (readOnlyTransaction, error) {
return readOnlyTransaction{ return readOnlyTransaction{
ReadTransaction: tran, ReadTransaction: tran,
keyer: db.keyer, keyer: db.keyer,
evLogger: db.evLogger,
}, nil }, nil
} }
@ -800,6 +803,7 @@ func (t readWriteTransaction) removeFromGlobal(gk, keyBuf, folder, device, file
if !haveOldGlobal { if !haveOldGlobal {
// Shouldn't ever happen, but doesn't hurt to handle. // Shouldn't ever happen, but doesn't hurt to handle.
t.evLogger.Log(events.Failure, "encountered empty global while removing item")
return keyBuf, t.Delete(gk) return keyBuf, t.Delete(gk)
} }

View File

@ -10,10 +10,11 @@ import (
"encoding/json" "encoding/json"
"io" "io"
"os" "os"
// "testing" "testing"
"github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/db/backend"
// "github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs"
// "github.com/syncthing/syncthing/lib/protocol" // "github.com/syncthing/syncthing/lib/protocol"
) )
@ -69,6 +70,28 @@ func openJSONS(file string) (backend.Backend, error) {
return db, nil return db, nil
} }
func newLowlevel(t testing.TB, backend backend.Backend) *Lowlevel {
t.Helper()
ll, err := NewLowlevel(backend, events.NoopLogger)
if err != nil {
t.Fatal(err)
}
return ll
}
func newLowlevelMemory(t testing.TB) *Lowlevel {
return newLowlevel(t, backend.OpenMemory())
}
func newFileSet(t testing.TB, folder string, fs fs.Filesystem, db *Lowlevel) *FileSet {
t.Helper()
fset, err := NewFileSet(folder, fs, db)
if err != nil {
t.Fatal(err)
}
return fset
}
// The following commented tests were used to generate jsons files to stdout for // The following commented tests were used to generate jsons files to stdout for
// future tests and are kept here for reference (reuse). // future tests and are kept here for reference (reuse).
@ -76,7 +99,7 @@ func openJSONS(file string) (backend.Backend, error) {
// local and remote, in the format used in 0.14.48. // local and remote, in the format used in 0.14.48.
// func TestGenerateIgnoredFilesDB(t *testing.T) { // func TestGenerateIgnoredFilesDB(t *testing.T) {
// db := OpenMemory() // db := OpenMemory()
// fs := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db) // fs := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db)
// fs.Update(protocol.LocalDeviceID, []protocol.FileInfo{ // fs.Update(protocol.LocalDeviceID, []protocol.FileInfo{
// { // invalid (ignored) file // { // invalid (ignored) file
// Name: "foo", // Name: "foo",
@ -111,7 +134,7 @@ func openJSONS(file string) (backend.Backend, error) {
// format used in 0.14.45. // format used in 0.14.45.
// func TestGenerateUpdate0to3DB(t *testing.T) { // func TestGenerateUpdate0to3DB(t *testing.T) {
// db := OpenMemory() // db := OpenMemory()
// fs := NewFileSet(update0to3Folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db) // fs := newFileSet(t, update0to3Folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db)
// for devID, files := range haveUpdate0to3 { // for devID, files := range haveUpdate0to3 {
// fs.Update(devID, files) // fs.Update(devID, files)
// } // }
@ -119,14 +142,14 @@ func openJSONS(file string) (backend.Backend, error) {
// } // }
// func TestGenerateUpdateTo10(t *testing.T) { // func TestGenerateUpdateTo10(t *testing.T) {
// db := NewLowlevel(backend.OpenMemory()) // db := newLowlevelMemory(t)
// defer db.Close() // defer db.Close()
// if err := UpdateSchema(db); err != nil { // if err := UpdateSchema(db); err != nil {
// t.Fatal(err) // t.Fatal(err)
// } // }
// fs := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), db) // fs := newFileSet(t, "test", fs.NewFilesystem(fs.FilesystemTypeFake, ""), db)
// files := []protocol.FileInfo{ // files := []protocol.FileInfo{
// {Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Deleted: true, Sequence: 1}, // {Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Deleted: true, Sequence: 1},

View File

@ -34,6 +34,7 @@ const (
AuditLog LocationEnum = "auditLog" AuditLog LocationEnum = "auditLog"
GUIAssets LocationEnum = "GUIAssets" GUIAssets LocationEnum = "GUIAssets"
DefFolder LocationEnum = "defFolder" DefFolder LocationEnum = "defFolder"
FailuresFile LocationEnum = "FailuresFile"
) )
type BaseDirEnum string type BaseDirEnum string
@ -97,6 +98,7 @@ var locationTemplates = map[LocationEnum]string{
AuditLog: "${data}/audit-${timestamp}.log", AuditLog: "${data}/audit-${timestamp}.log",
GUIAssets: "${config}/gui", GUIAssets: "${config}/gui",
DefFolder: "${userHome}/Sync", DefFolder: "${userHome}/Sync",
FailuresFile: "${data}/failures-unreported.txt",
} }
var locations = make(map[LocationEnum]string) var locations = make(map[LocationEnum]string)

View File

@ -14,8 +14,6 @@ import (
"time" "time"
"github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/scanner" "github.com/syncthing/syncthing/lib/scanner"
@ -457,7 +455,7 @@ func setupROFolder(t *testing.T) (*testModel, *receiveOnlyFolder) {
cfg.Folders = []config.FolderConfiguration{fcfg} cfg.Folders = []config.FolderConfiguration{fcfg}
w.Replace(cfg) w.Replace(cfg)
m := newModel(w, myID, "syncthing", "dev", db.NewLowlevel(backend.OpenMemory()), nil) m := newModel(t, w, myID, "syncthing", "dev", nil)
m.ServeBackground() m.ServeBackground()
<-m.started <-m.started
must(t, m.ScanFolder("ro")) must(t, m.ScanFolder("ro"))

View File

@ -91,10 +91,10 @@ func createFile(t *testing.T, name string, fs fs.Filesystem) protocol.FileInfo {
} }
// Sets up a folder and model, but makes sure the services aren't actually running. // Sets up a folder and model, but makes sure the services aren't actually running.
func setupSendReceiveFolder(files ...protocol.FileInfo) (*testModel, *sendReceiveFolder) { func setupSendReceiveFolder(t testing.TB, files ...protocol.FileInfo) (*testModel, *sendReceiveFolder) {
w, fcfg := tmpDefaultWrapper() w, fcfg := tmpDefaultWrapper()
// Initialise model and stop immediately. // Initialise model and stop immediately.
model := setupModel(w) model := setupModel(t, w)
model.cancel() model.cancel()
<-model.stopped <-model.stopped
f := model.folderRunners[fcfg.ID].(*sendReceiveFolder) f := model.folderRunners[fcfg.ID].(*sendReceiveFolder)
@ -129,7 +129,7 @@ func TestHandleFile(t *testing.T) {
requiredFile := existingFile requiredFile := existingFile
requiredFile.Blocks = blocks[1:] requiredFile.Blocks = blocks[1:]
m, f := setupSendReceiveFolder(existingFile) m, f := setupSendReceiveFolder(t, existingFile)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
copyChan := make(chan copyBlocksState, 1) copyChan := make(chan copyBlocksState, 1)
@ -171,7 +171,7 @@ func TestHandleFileWithTemp(t *testing.T) {
requiredFile := existingFile requiredFile := existingFile
requiredFile.Blocks = blocks[1:] requiredFile.Blocks = blocks[1:]
m, f := setupSendReceiveFolder(existingFile) m, f := setupSendReceiveFolder(t, existingFile)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
if _, err := prepareTmpFile(f.Filesystem()); err != nil { if _, err := prepareTmpFile(f.Filesystem()); err != nil {
@ -227,7 +227,7 @@ func TestCopierFinder(t *testing.T) {
requiredFile.Blocks = blocks[1:] requiredFile.Blocks = blocks[1:]
requiredFile.Name = "file2" requiredFile.Name = "file2"
m, f := setupSendReceiveFolder(existingFile) m, f := setupSendReceiveFolder(t, existingFile)
f.CopyRangeMethod = method f.CopyRangeMethod = method
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
@ -308,7 +308,7 @@ func TestCopierFinder(t *testing.T) {
func TestWeakHash(t *testing.T) { func TestWeakHash(t *testing.T) {
// Setup the model/pull environment // Setup the model/pull environment
model, fo := setupSendReceiveFolder() model, fo := setupSendReceiveFolder(t)
defer cleanupSRFolder(fo, model) defer cleanupSRFolder(fo, model)
ffs := fo.Filesystem() ffs := fo.Filesystem()
@ -437,7 +437,7 @@ func TestCopierCleanup(t *testing.T) {
// Create a file // Create a file
file := setupFile("test", []int{0}) file := setupFile("test", []int{0})
file.Size = 1 file.Size = 1
m, f := setupSendReceiveFolder(file) m, f := setupSendReceiveFolder(t, file)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
file.Blocks = []protocol.BlockInfo{blocks[1]} file.Blocks = []protocol.BlockInfo{blocks[1]}
@ -470,7 +470,7 @@ func TestCopierCleanup(t *testing.T) {
func TestDeregisterOnFailInCopy(t *testing.T) { func TestDeregisterOnFailInCopy(t *testing.T) {
file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8}) file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
// Set up our evet subscription early // Set up our evet subscription early
@ -570,7 +570,7 @@ func TestDeregisterOnFailInCopy(t *testing.T) {
func TestDeregisterOnFailInPull(t *testing.T) { func TestDeregisterOnFailInPull(t *testing.T) {
file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8}) file := setupFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
// Set up our evet subscription early // Set up our evet subscription early
@ -673,7 +673,7 @@ func TestDeregisterOnFailInPull(t *testing.T) {
} }
func TestIssue3164(t *testing.T) { func TestIssue3164(t *testing.T) {
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
ffs := f.Filesystem() ffs := f.Filesystem()
tmpDir := ffs.URI() tmpDir := ffs.URI()
@ -764,7 +764,7 @@ func TestDiffEmpty(t *testing.T) {
// option is true and the permissions do not match between the file on disk and // option is true and the permissions do not match between the file on disk and
// in the db. // in the db.
func TestDeleteIgnorePerms(t *testing.T) { func TestDeleteIgnorePerms(t *testing.T) {
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
ffs := f.Filesystem() ffs := f.Filesystem()
f.IgnorePerms = true f.IgnorePerms = true
@ -802,7 +802,7 @@ func TestCopyOwner(t *testing.T) {
// Set up a folder with the CopyParentOwner bit and backed by a fake // Set up a folder with the CopyParentOwner bit and backed by a fake
// filesystem. // filesystem.
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
f.folder.FolderConfiguration = config.NewFolderConfiguration(m.id, f.ID, f.Label, fs.FilesystemTypeFake, "/TestCopyOwner") f.folder.FolderConfiguration = config.NewFolderConfiguration(m.id, f.ID, f.Label, fs.FilesystemTypeFake, "/TestCopyOwner")
f.folder.FolderConfiguration.CopyOwnershipFromParent = true f.folder.FolderConfiguration.CopyOwnershipFromParent = true
@ -904,7 +904,7 @@ func TestCopyOwner(t *testing.T) {
// TestSRConflictReplaceFileByDir checks that a conflict is created when an existing file // TestSRConflictReplaceFileByDir checks that a conflict is created when an existing file
// is replaced with a directory and versions are conflicting // is replaced with a directory and versions are conflicting
func TestSRConflictReplaceFileByDir(t *testing.T) { func TestSRConflictReplaceFileByDir(t *testing.T) {
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
ffs := f.Filesystem() ffs := f.Filesystem()
@ -936,7 +936,7 @@ func TestSRConflictReplaceFileByDir(t *testing.T) {
// TestSRConflictReplaceFileByLink checks that a conflict is created when an existing file // TestSRConflictReplaceFileByLink checks that a conflict is created when an existing file
// is replaced with a link and versions are conflicting // is replaced with a link and versions are conflicting
func TestSRConflictReplaceFileByLink(t *testing.T) { func TestSRConflictReplaceFileByLink(t *testing.T) {
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
ffs := f.Filesystem() ffs := f.Filesystem()
@ -969,7 +969,7 @@ func TestSRConflictReplaceFileByLink(t *testing.T) {
// TestDeleteBehindSymlink checks that we don't delete or schedule a scan // TestDeleteBehindSymlink checks that we don't delete or schedule a scan
// when trying to delete a file behind a symlink. // when trying to delete a file behind a symlink.
func TestDeleteBehindSymlink(t *testing.T) { func TestDeleteBehindSymlink(t *testing.T) {
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
ffs := f.Filesystem() ffs := f.Filesystem()
@ -1020,7 +1020,7 @@ func TestDeleteBehindSymlink(t *testing.T) {
// Reproduces https://github.com/syncthing/syncthing/issues/6559 // Reproduces https://github.com/syncthing/syncthing/issues/6559
func TestPullCtxCancel(t *testing.T) { func TestPullCtxCancel(t *testing.T) {
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
pullChan := make(chan pullBlockState) pullChan := make(chan pullBlockState)
@ -1062,7 +1062,7 @@ func TestPullCtxCancel(t *testing.T) {
} }
func TestPullDeleteUnscannedDir(t *testing.T) { func TestPullDeleteUnscannedDir(t *testing.T) {
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
ffs := f.Filesystem() ffs := f.Filesystem()
@ -1091,7 +1091,7 @@ func TestPullDeleteUnscannedDir(t *testing.T) {
} }
func TestPullCaseOnlyPerformFinish(t *testing.T) { func TestPullCaseOnlyPerformFinish(t *testing.T) {
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
ffs := f.Filesystem() ffs := f.Filesystem()
@ -1152,7 +1152,7 @@ func TestPullCaseOnlySymlink(t *testing.T) {
} }
func testPullCaseOnlyDirOrSymlink(t *testing.T, dir bool) { func testPullCaseOnlyDirOrSymlink(t *testing.T, dir bool) {
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
ffs := f.Filesystem() ffs := f.Filesystem()
@ -1207,7 +1207,7 @@ func testPullCaseOnlyDirOrSymlink(t *testing.T, dir bool) {
} }
func TestPullTempFileCaseConflict(t *testing.T) { func TestPullTempFileCaseConflict(t *testing.T) {
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
copyChan := make(chan copyBlocksState, 1) copyChan := make(chan copyBlocksState, 1)
@ -1233,7 +1233,7 @@ func TestPullTempFileCaseConflict(t *testing.T) {
} }
func TestPullCaseOnlyRename(t *testing.T) { func TestPullCaseOnlyRename(t *testing.T) {
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
// tempNameConfl := fs.TempName(confl) // tempNameConfl := fs.TempName(confl)
@ -1276,7 +1276,7 @@ func TestPullSymlinkOverExistingWindows(t *testing.T) {
t.Skip() t.Skip()
} }
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
name := "foo" name := "foo"
@ -1316,7 +1316,7 @@ func TestPullSymlinkOverExistingWindows(t *testing.T) {
} }
func TestPullDeleteCaseConflict(t *testing.T) { func TestPullDeleteCaseConflict(t *testing.T) {
m, f := setupSendReceiveFolder() m, f := setupSendReceiveFolder(t)
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
name := "foo" name := "foo"

View File

@ -134,6 +134,7 @@ type model struct {
// folderIOLimiter limits the number of concurrent I/O heavy operations, // folderIOLimiter limits the number of concurrent I/O heavy operations,
// such as scans and pulls. // such as scans and pulls.
folderIOLimiter *byteSemaphore folderIOLimiter *byteSemaphore
fatalChan chan error
// fields protected by fmut // fields protected by fmut
fmut sync.RWMutex fmut sync.RWMutex
@ -217,6 +218,7 @@ func NewModel(cfg config.Wrapper, id protocol.DeviceID, clientName, clientVersio
shortID: id.Short(), shortID: id.Short(),
globalRequestLimiter: newByteSemaphore(1024 * cfg.Options().MaxConcurrentIncomingRequestKiB()), globalRequestLimiter: newByteSemaphore(1024 * cfg.Options().MaxConcurrentIncomingRequestKiB()),
folderIOLimiter: newByteSemaphore(cfg.Options().MaxFolderConcurrency()), folderIOLimiter: newByteSemaphore(cfg.Options().MaxFolderConcurrency()),
fatalChan: make(chan error),
// fields protected by fmut // fields protected by fmut
fmut: sync.NewRWMutex(), fmut: sync.NewRWMutex(),
@ -253,7 +255,27 @@ func NewModel(cfg config.Wrapper, id protocol.DeviceID, clientName, clientVersio
} }
func (m *model) serve(ctx context.Context) error { func (m *model) serve(ctx context.Context) error {
// Add and start folders defer m.closeAllConnectionsAndWait()
m.cfg.Subscribe(m)
defer m.cfg.Unsubscribe(m)
if err := m.initFolders(); err != nil {
close(m.started)
return util.AsFatalErr(err, util.ExitError)
}
close(m.started)
select {
case <-ctx.Done():
return ctx.Err()
case err := <-m.fatalChan:
return util.AsFatalErr(err, util.ExitError)
}
}
func (m *model) initFolders() error {
cacheIgnoredFiles := m.cfg.Options().CacheIgnoredFiles cacheIgnoredFiles := m.cfg.Options().CacheIgnoredFiles
existingDevices := m.cfg.Devices() existingDevices := m.cfg.Devices()
existingFolders := m.cfg.Folders() existingFolders := m.cfg.Folders()
@ -263,7 +285,10 @@ func (m *model) serve(ctx context.Context) error {
folderCfg.CreateRoot() folderCfg.CreateRoot()
continue continue
} }
m.newFolder(folderCfg, cacheIgnoredFiles) err := m.newFolder(folderCfg, cacheIgnoredFiles)
if err != nil {
return err
}
clusterConfigDevices.add(folderCfg.DeviceIDs()) clusterConfigDevices.add(folderCfg.DeviceIDs())
} }
@ -271,12 +296,10 @@ func (m *model) serve(ctx context.Context) error {
m.cleanPending(existingDevices, existingFolders, ignoredDevices, nil) m.cleanPending(existingDevices, existingFolders, ignoredDevices, nil)
m.resendClusterConfig(clusterConfigDevices.AsSlice()) m.resendClusterConfig(clusterConfigDevices.AsSlice())
m.cfg.Subscribe(m) return nil
}
close(m.started) func (m *model) closeAllConnectionsAndWait() {
<-ctx.Done()
m.cfg.Unsubscribe(m)
m.pmut.RLock() m.pmut.RLock()
closed := make([]chan struct{}, 0, len(m.conn)) closed := make([]chan struct{}, 0, len(m.conn))
for id, conn := range m.conn { for id, conn := range m.conn {
@ -287,7 +310,13 @@ func (m *model) serve(ctx context.Context) error {
for _, c := range closed { for _, c := range closed {
<-c <-c
} }
return nil }
func (m *model) fatal(err error) {
select {
case m.fatalChan <- err:
default:
}
} }
// StartDeadlockDetector starts a deadlock detector on the models locks which // StartDeadlockDetector starts a deadlock detector on the models locks which
@ -472,7 +501,7 @@ func (m *model) cleanupFolderLocked(cfg config.FolderConfiguration) {
delete(m.folderVersioners, cfg.ID) delete(m.folderVersioners, cfg.ID)
} }
func (m *model) restartFolder(from, to config.FolderConfiguration, cacheIgnoredFiles bool) { func (m *model) restartFolder(from, to config.FolderConfiguration, cacheIgnoredFiles bool) error {
if len(to.ID) == 0 { if len(to.ID) == 0 {
panic("bug: cannot restart empty folder ID") panic("bug: cannot restart empty folder ID")
} }
@ -512,7 +541,11 @@ func (m *model) restartFolder(from, to config.FolderConfiguration, cacheIgnoredF
// Create a new fset. Might take a while and we do it under // Create a new fset. Might take a while and we do it under
// locking, but it's unsafe to create fset:s concurrently so // locking, but it's unsafe to create fset:s concurrently so
// that's the price we pay. // that's the price we pay.
fset = db.NewFileSet(folder, to.Filesystem(), m.db) var err error
fset, err = db.NewFileSet(folder, to.Filesystem(), m.db)
if err != nil {
return fmt.Errorf("restarting %v: %w", to.Description(), err)
}
} }
m.addAndStartFolderLocked(to, fset, cacheIgnoredFiles) m.addAndStartFolderLocked(to, fset, cacheIgnoredFiles)
} }
@ -547,12 +580,17 @@ func (m *model) restartFolder(from, to config.FolderConfiguration, cacheIgnoredF
infoMsg = "Restarted" infoMsg = "Restarted"
} }
l.Infof("%v folder %v (%v)", infoMsg, to.Description(), to.Type) l.Infof("%v folder %v (%v)", infoMsg, to.Description(), to.Type)
return nil
} }
func (m *model) newFolder(cfg config.FolderConfiguration, cacheIgnoredFiles bool) { func (m *model) newFolder(cfg config.FolderConfiguration, cacheIgnoredFiles bool) error {
// Creating the fileset can take a long time (metadata calculation) so // Creating the fileset can take a long time (metadata calculation) so
// we do it outside of the lock. // we do it outside of the lock.
fset := db.NewFileSet(cfg.ID, cfg.Filesystem(), m.db) fset, err := db.NewFileSet(cfg.ID, cfg.Filesystem(), m.db)
if err != nil {
return fmt.Errorf("adding %v: %w", cfg.Description(), err)
}
m.fmut.Lock() m.fmut.Lock()
defer m.fmut.Unlock() defer m.fmut.Unlock()
@ -569,6 +607,7 @@ func (m *model) newFolder(cfg config.FolderConfiguration, cacheIgnoredFiles bool
m.pmut.RUnlock() m.pmut.RUnlock()
m.addAndStartFolderLocked(cfg, fset, cacheIgnoredFiles) m.addAndStartFolderLocked(cfg, fset, cacheIgnoredFiles)
return nil
} }
func (m *model) UsageReportingStats(report *contract.Report, version int, preview bool) { func (m *model) UsageReportingStats(report *contract.Report, version int, preview bool) {
@ -2579,7 +2618,10 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
l.Infoln("Paused folder", cfg.Description()) l.Infoln("Paused folder", cfg.Description())
} else { } else {
l.Infoln("Adding folder", cfg.Description()) l.Infoln("Adding folder", cfg.Description())
m.newFolder(cfg, to.Options.CacheIgnoredFiles) if err := m.newFolder(cfg, to.Options.CacheIgnoredFiles); err != nil {
m.fatal(err)
return true
}
} }
clusterConfigDevices.add(cfg.DeviceIDs()) clusterConfigDevices.add(cfg.DeviceIDs())
} }
@ -2603,7 +2645,10 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
// This folder exists on both sides. Settings might have changed. // This folder exists on both sides. Settings might have changed.
// Check if anything differs that requires a restart. // Check if anything differs that requires a restart.
if !reflect.DeepEqual(fromCfg.RequiresRestartOnly(), toCfg.RequiresRestartOnly()) || from.Options.CacheIgnoredFiles != to.Options.CacheIgnoredFiles { if !reflect.DeepEqual(fromCfg.RequiresRestartOnly(), toCfg.RequiresRestartOnly()) || from.Options.CacheIgnoredFiles != to.Options.CacheIgnoredFiles {
m.restartFolder(fromCfg, toCfg, to.Options.CacheIgnoredFiles) if err := m.restartFolder(fromCfg, toCfg, to.Options.CacheIgnoredFiles); err != nil {
m.fatal(err)
return true
}
clusterConfigDevices.add(fromCfg.DeviceIDs()) clusterConfigDevices.add(fromCfg.DeviceIDs())
clusterConfigDevices.add(toCfg.DeviceIDs()) clusterConfigDevices.add(toCfg.DeviceIDs())
} }

View File

@ -118,10 +118,10 @@ func createTmpWrapper(cfg config.Configuration) config.Wrapper {
return wrapper return wrapper
} }
func newState(cfg config.Configuration) *testModel { func newState(t testing.TB, cfg config.Configuration) *testModel {
wcfg := createTmpWrapper(cfg) wcfg := createTmpWrapper(cfg)
m := setupModel(wcfg) m := setupModel(t, wcfg)
for _, dev := range cfg.Devices { for _, dev := range cfg.Devices {
m.AddConnection(&fakeConnection{id: dev.DeviceID, model: m}, protocol.Hello{}) m.AddConnection(&fakeConnection{id: dev.DeviceID, model: m}, protocol.Hello{})
@ -154,7 +154,7 @@ func addFolderDevicesToClusterConfig(cc protocol.ClusterConfig, remote protocol.
} }
func TestRequest(t *testing.T) { func TestRequest(t *testing.T) {
m := setupModel(defaultCfgWrapper) m := setupModel(t, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
// Existing, shared file // Existing, shared file
@ -227,7 +227,7 @@ func BenchmarkIndex_100(b *testing.B) {
} }
func benchmarkIndex(b *testing.B, nfiles int) { func benchmarkIndex(b *testing.B, nfiles int) {
m := setupModel(defaultCfgWrapper) m := setupModel(b, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
files := genFiles(nfiles) files := genFiles(nfiles)
@ -253,7 +253,7 @@ func BenchmarkIndexUpdate_10000_1(b *testing.B) {
} }
func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) { func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) {
m := setupModel(defaultCfgWrapper) m := setupModel(b, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
files := genFiles(nfiles) files := genFiles(nfiles)
@ -269,7 +269,7 @@ func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) {
} }
func BenchmarkRequestOut(b *testing.B) { func BenchmarkRequestOut(b *testing.B) {
m := setupModel(defaultCfgWrapper) m := setupModel(b, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
const n = 1000 const n = 1000
@ -295,7 +295,7 @@ func BenchmarkRequestOut(b *testing.B) {
} }
func BenchmarkRequestInSingleFile(b *testing.B) { func BenchmarkRequestInSingleFile(b *testing.B) {
m := setupModel(defaultCfgWrapper) m := setupModel(b, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
buf := make([]byte, 128<<10) buf := make([]byte, 128<<10)
@ -331,8 +331,7 @@ func TestDeviceRename(t *testing.T) {
} }
cfg := config.Wrap("testdata/tmpconfig.xml", rawCfg, device1, events.NoopLogger) cfg := config.Wrap("testdata/tmpconfig.xml", rawCfg, device1, events.NoopLogger)
db := db.NewLowlevel(backend.OpenMemory()) m := newModel(t, cfg, myID, "syncthing", "dev", nil)
m := newModel(cfg, myID, "syncthing", "dev", db, nil)
if cfg.Devices()[device1].Name != "" { if cfg.Devices()[device1].Name != "" {
t.Errorf("Device already has a name") t.Errorf("Device already has a name")
@ -427,10 +426,8 @@ func TestClusterConfig(t *testing.T) {
}, },
} }
db := db.NewLowlevel(backend.OpenMemory())
wrapper := createTmpWrapper(cfg) wrapper := createTmpWrapper(cfg)
m := newModel(wrapper, myID, "syncthing", "dev", db, nil) m := newModel(t, wrapper, myID, "syncthing", "dev", nil)
m.ServeBackground() m.ServeBackground()
defer cleanupModel(m) defer cleanupModel(m)
@ -497,7 +494,7 @@ func TestIntroducer(t *testing.T) {
return false return false
} }
m := newState(config.Configuration{ m := newState(t, config.Configuration{
Devices: []config.DeviceConfiguration{ Devices: []config.DeviceConfiguration{
{ {
DeviceID: device1, DeviceID: device1,
@ -538,7 +535,7 @@ func TestIntroducer(t *testing.T) {
} }
cleanupModel(m) cleanupModel(m)
m = newState(config.Configuration{ m = newState(t, config.Configuration{
Devices: []config.DeviceConfiguration{ Devices: []config.DeviceConfiguration{
{ {
DeviceID: device1, DeviceID: device1,
@ -589,7 +586,7 @@ func TestIntroducer(t *testing.T) {
} }
cleanupModel(m) cleanupModel(m)
m = newState(config.Configuration{ m = newState(t, config.Configuration{
Devices: []config.DeviceConfiguration{ Devices: []config.DeviceConfiguration{
{ {
DeviceID: device1, DeviceID: device1,
@ -637,7 +634,7 @@ func TestIntroducer(t *testing.T) {
// 1. Introducer flag no longer set on device // 1. Introducer flag no longer set on device
cleanupModel(m) cleanupModel(m)
m = newState(config.Configuration{ m = newState(t, config.Configuration{
Devices: []config.DeviceConfiguration{ Devices: []config.DeviceConfiguration{
{ {
DeviceID: device1, DeviceID: device1,
@ -684,7 +681,7 @@ func TestIntroducer(t *testing.T) {
// 2. SkipIntroductionRemovals is set // 2. SkipIntroductionRemovals is set
cleanupModel(m) cleanupModel(m)
m = newState(config.Configuration{ m = newState(t, config.Configuration{
Devices: []config.DeviceConfiguration{ Devices: []config.DeviceConfiguration{
{ {
DeviceID: device1, DeviceID: device1,
@ -737,7 +734,7 @@ func TestIntroducer(t *testing.T) {
// Test device not being removed as it's shared without an introducer. // Test device not being removed as it's shared without an introducer.
cleanupModel(m) cleanupModel(m)
m = newState(config.Configuration{ m = newState(t, config.Configuration{
Devices: []config.DeviceConfiguration{ Devices: []config.DeviceConfiguration{
{ {
DeviceID: device1, DeviceID: device1,
@ -784,7 +781,7 @@ func TestIntroducer(t *testing.T) {
// Test device not being removed as it's shared by a different introducer. // Test device not being removed as it's shared by a different introducer.
cleanupModel(m) cleanupModel(m)
m = newState(config.Configuration{ m = newState(t, config.Configuration{
Devices: []config.DeviceConfiguration{ Devices: []config.DeviceConfiguration{
{ {
DeviceID: device1, DeviceID: device1,
@ -831,7 +828,7 @@ func TestIntroducer(t *testing.T) {
} }
func TestIssue4897(t *testing.T) { func TestIssue4897(t *testing.T) {
m := newState(config.Configuration{ m := newState(t, config.Configuration{
Devices: []config.DeviceConfiguration{ Devices: []config.DeviceConfiguration{
{ {
DeviceID: device1, DeviceID: device1,
@ -863,7 +860,7 @@ func TestIssue4897(t *testing.T) {
// PR-comments: https://github.com/syncthing/syncthing/pull/5069/files#r203146546 // PR-comments: https://github.com/syncthing/syncthing/pull/5069/files#r203146546
// Issue: https://github.com/syncthing/syncthing/pull/5509 // Issue: https://github.com/syncthing/syncthing/pull/5509
func TestIssue5063(t *testing.T) { func TestIssue5063(t *testing.T) {
m := newState(defaultAutoAcceptCfg) m := newState(t, defaultAutoAcceptCfg)
defer cleanupModel(m) defer cleanupModel(m)
m.pmut.Lock() m.pmut.Lock()
@ -918,7 +915,7 @@ func TestAutoAcceptRejected(t *testing.T) {
for i := range tcfg.Devices { for i := range tcfg.Devices {
tcfg.Devices[i].AutoAcceptFolders = false tcfg.Devices[i].AutoAcceptFolders = false
} }
m := newState(tcfg) m := newState(t, tcfg)
defer cleanupModel(m) defer cleanupModel(m)
id := srand.String(8) id := srand.String(8)
defer os.RemoveAll(id) defer os.RemoveAll(id)
@ -931,7 +928,7 @@ func TestAutoAcceptRejected(t *testing.T) {
func TestAutoAcceptNewFolder(t *testing.T) { func TestAutoAcceptNewFolder(t *testing.T) {
// New folder // New folder
m := newState(defaultAutoAcceptCfg) m := newState(t, defaultAutoAcceptCfg)
defer cleanupModel(m) defer cleanupModel(m)
id := srand.String(8) id := srand.String(8)
defer os.RemoveAll(id) defer os.RemoveAll(id)
@ -942,7 +939,7 @@ func TestAutoAcceptNewFolder(t *testing.T) {
} }
func TestAutoAcceptNewFolderFromTwoDevices(t *testing.T) { func TestAutoAcceptNewFolderFromTwoDevices(t *testing.T) {
m := newState(defaultAutoAcceptCfg) m := newState(t, defaultAutoAcceptCfg)
defer cleanupModel(m) defer cleanupModel(m)
id := srand.String(8) id := srand.String(8)
defer os.RemoveAll(id) defer os.RemoveAll(id)
@ -962,7 +959,7 @@ func TestAutoAcceptNewFolderFromTwoDevices(t *testing.T) {
func TestAutoAcceptNewFolderFromOnlyOneDevice(t *testing.T) { func TestAutoAcceptNewFolderFromOnlyOneDevice(t *testing.T) {
modifiedCfg := defaultAutoAcceptCfg.Copy() modifiedCfg := defaultAutoAcceptCfg.Copy()
modifiedCfg.Devices[2].AutoAcceptFolders = false modifiedCfg.Devices[2].AutoAcceptFolders = false
m := newState(modifiedCfg) m := newState(t, modifiedCfg)
id := srand.String(8) id := srand.String(8)
defer os.RemoveAll(id) defer os.RemoveAll(id)
defer cleanupModel(m) defer cleanupModel(m)
@ -1005,7 +1002,7 @@ func TestAutoAcceptNewFolderPremutationsNoPanic(t *testing.T) {
fcfg.Paused = localFolderPaused fcfg.Paused = localFolderPaused
cfg.Folders = append(cfg.Folders, fcfg) cfg.Folders = append(cfg.Folders, fcfg)
} }
m := newState(cfg) m := newState(t, cfg)
m.ClusterConfig(device1, protocol.ClusterConfig{ m.ClusterConfig(device1, protocol.ClusterConfig{
Folders: []protocol.Folder{dev1folder}, Folders: []protocol.Folder{dev1folder},
}) })
@ -1027,7 +1024,7 @@ func TestAutoAcceptMultipleFolders(t *testing.T) {
defer os.RemoveAll(id1) defer os.RemoveAll(id1)
id2 := srand.String(8) id2 := srand.String(8)
defer os.RemoveAll(id2) defer os.RemoveAll(id2)
m := newState(defaultAutoAcceptCfg) m := newState(t, defaultAutoAcceptCfg)
defer cleanupModel(m) defer cleanupModel(m)
m.ClusterConfig(device1, createClusterConfig(device1, id1, id2)) m.ClusterConfig(device1, createClusterConfig(device1, id1, id2))
if fcfg, ok := m.cfg.Folder(id1); !ok || !fcfg.SharedWith(device1) { if fcfg, ok := m.cfg.Folder(id1); !ok || !fcfg.SharedWith(device1) {
@ -1052,7 +1049,7 @@ func TestAutoAcceptExistingFolder(t *testing.T) {
Path: idOther, // To check that path does not get changed. Path: idOther, // To check that path does not get changed.
}, },
} }
m := newState(tcfg) m := newState(t, tcfg)
defer cleanupModel(m) defer cleanupModel(m)
if fcfg, ok := m.cfg.Folder(id); !ok || fcfg.SharedWith(device1) { if fcfg, ok := m.cfg.Folder(id); !ok || fcfg.SharedWith(device1) {
t.Error("missing folder, or shared", id) t.Error("missing folder, or shared", id)
@ -1078,7 +1075,7 @@ func TestAutoAcceptNewAndExistingFolder(t *testing.T) {
Path: id1, // from previous test case, to verify that path doesn't get changed. Path: id1, // from previous test case, to verify that path doesn't get changed.
}, },
} }
m := newState(tcfg) m := newState(t, tcfg)
defer cleanupModel(m) defer cleanupModel(m)
if fcfg, ok := m.cfg.Folder(id1); !ok || fcfg.SharedWith(device1) { if fcfg, ok := m.cfg.Folder(id1); !ok || fcfg.SharedWith(device1) {
t.Error("missing folder, or shared", id1) t.Error("missing folder, or shared", id1)
@ -1108,7 +1105,7 @@ func TestAutoAcceptAlreadyShared(t *testing.T) {
}, },
}, },
} }
m := newState(tcfg) m := newState(t, tcfg)
defer cleanupModel(m) defer cleanupModel(m)
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) { if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) {
t.Error("missing folder, or not shared", id) t.Error("missing folder, or not shared", id)
@ -1129,7 +1126,7 @@ func TestAutoAcceptNameConflict(t *testing.T) {
testOs.MkdirAll(label, 0777) testOs.MkdirAll(label, 0777)
defer os.RemoveAll(id) defer os.RemoveAll(id)
defer os.RemoveAll(label) defer os.RemoveAll(label)
m := newState(defaultAutoAcceptCfg) m := newState(t, defaultAutoAcceptCfg)
defer cleanupModel(m) defer cleanupModel(m)
m.ClusterConfig(device1, protocol.ClusterConfig{ m.ClusterConfig(device1, protocol.ClusterConfig{
Folders: []protocol.Folder{ Folders: []protocol.Folder{
@ -1146,7 +1143,7 @@ func TestAutoAcceptNameConflict(t *testing.T) {
func TestAutoAcceptPrefersLabel(t *testing.T) { func TestAutoAcceptPrefersLabel(t *testing.T) {
// Prefers label, falls back to ID. // Prefers label, falls back to ID.
m := newState(defaultAutoAcceptCfg) m := newState(t, defaultAutoAcceptCfg)
id := srand.String(8) id := srand.String(8)
label := srand.String(8) label := srand.String(8)
defer os.RemoveAll(id) defer os.RemoveAll(id)
@ -1169,7 +1166,7 @@ func TestAutoAcceptFallsBackToID(t *testing.T) {
testOs := &fatalOs{t} testOs := &fatalOs{t}
// Prefers label, falls back to ID. // Prefers label, falls back to ID.
m := newState(defaultAutoAcceptCfg) m := newState(t, defaultAutoAcceptCfg)
id := srand.String(8) id := srand.String(8)
label := srand.String(8) label := srand.String(8)
t.Log(id, label) t.Log(id, label)
@ -1207,7 +1204,7 @@ func TestAutoAcceptPausedWhenFolderConfigChanged(t *testing.T) {
DeviceID: device1, DeviceID: device1,
}) })
tcfg.Folders = []config.FolderConfiguration{fcfg} tcfg.Folders = []config.FolderConfiguration{fcfg}
m := newState(tcfg) m := newState(t, tcfg)
defer cleanupModel(m) defer cleanupModel(m)
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) { if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) {
t.Error("missing folder, or not shared", id) t.Error("missing folder, or not shared", id)
@ -1257,7 +1254,7 @@ func TestAutoAcceptPausedWhenFolderConfigNotChanged(t *testing.T) {
}, },
}, fcfg.Devices...) // Need to ensure this device order to avoid folder restart. }, fcfg.Devices...) // Need to ensure this device order to avoid folder restart.
tcfg.Folders = []config.FolderConfiguration{fcfg} tcfg.Folders = []config.FolderConfiguration{fcfg}
m := newState(tcfg) m := newState(t, tcfg)
defer cleanupModel(m) defer cleanupModel(m)
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) { if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) {
t.Error("missing folder, or not shared", id) t.Error("missing folder, or not shared", id)
@ -1289,7 +1286,7 @@ func TestAutoAcceptPausedWhenFolderConfigNotChanged(t *testing.T) {
func TestAutoAcceptEnc(t *testing.T) { func TestAutoAcceptEnc(t *testing.T) {
tcfg := defaultAutoAcceptCfg.Copy() tcfg := defaultAutoAcceptCfg.Copy()
m := newState(tcfg) m := newState(t, tcfg)
defer cleanupModel(m) defer cleanupModel(m)
id := srand.String(8) id := srand.String(8)
@ -1460,10 +1457,10 @@ func TestIgnores(t *testing.T) {
mustRemove(t, defaultFs.MkdirAll(config.DefaultMarkerName, 0644)) mustRemove(t, defaultFs.MkdirAll(config.DefaultMarkerName, 0644))
writeFile(defaultFs, ".stignore", []byte(".*\nquux\n"), 0644) writeFile(defaultFs, ".stignore", []byte(".*\nquux\n"), 0644)
m := setupModel(defaultCfgWrapper) m := setupModel(t, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
folderIgnoresAlwaysReload(m, defaultFolderConfig) folderIgnoresAlwaysReload(t, m, defaultFolderConfig)
// Make sure the initial scan has finished (ScanFolders is blocking) // Make sure the initial scan has finished (ScanFolders is blocking)
m.ScanFolders() m.ScanFolders()
@ -1521,7 +1518,7 @@ func TestEmptyIgnores(t *testing.T) {
mustRemove(t, defaultFs.RemoveAll(config.DefaultMarkerName)) mustRemove(t, defaultFs.RemoveAll(config.DefaultMarkerName))
must(t, defaultFs.MkdirAll(config.DefaultMarkerName, 0644)) must(t, defaultFs.MkdirAll(config.DefaultMarkerName, 0644))
m := setupModel(defaultCfgWrapper) m := setupModel(t, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
if err := m.SetIgnores("default", []string{}); err != nil { if err := m.SetIgnores("default", []string{}); err != nil {
@ -1573,12 +1570,6 @@ func waitForState(t *testing.T, sub events.Subscription, folder, expected string
func TestROScanRecovery(t *testing.T) { func TestROScanRecovery(t *testing.T) {
testOs := &fatalOs{t} testOs := &fatalOs{t}
ldb := db.NewLowlevel(backend.OpenMemory())
set := db.NewFileSet("default", defaultFs, ldb)
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
{Name: "dummyfile", Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1}}}},
})
fcfg := config.FolderConfiguration{ fcfg := config.FolderConfiguration{
ID: "default", ID: "default",
Path: "rotestfolder", Path: "rotestfolder",
@ -1594,10 +1585,15 @@ func TestROScanRecovery(t *testing.T) {
}, },
}, },
}) })
m := newModel(t, cfg, myID, "syncthing", "dev", nil)
set := newFileSet(t, "default", defaultFs, m.db)
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
{Name: "dummyfile", Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1}}}},
})
testOs.RemoveAll(fcfg.Path) testOs.RemoveAll(fcfg.Path)
m := newModel(cfg, myID, "syncthing", "dev", ldb, nil)
sub := m.evLogger.Subscribe(events.StateChanged) sub := m.evLogger.Subscribe(events.StateChanged)
defer sub.Unsubscribe() defer sub.Unsubscribe()
m.ServeBackground() m.ServeBackground()
@ -1626,12 +1622,6 @@ func TestROScanRecovery(t *testing.T) {
func TestRWScanRecovery(t *testing.T) { func TestRWScanRecovery(t *testing.T) {
testOs := &fatalOs{t} testOs := &fatalOs{t}
ldb := db.NewLowlevel(backend.OpenMemory())
set := db.NewFileSet("default", defaultFs, ldb)
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
{Name: "dummyfile", Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1}}}},
})
fcfg := config.FolderConfiguration{ fcfg := config.FolderConfiguration{
ID: "default", ID: "default",
Path: "rwtestfolder", Path: "rwtestfolder",
@ -1647,10 +1637,15 @@ func TestRWScanRecovery(t *testing.T) {
}, },
}, },
}) })
m := newModel(t, cfg, myID, "syncthing", "dev", nil)
testOs.RemoveAll(fcfg.Path) testOs.RemoveAll(fcfg.Path)
m := newModel(cfg, myID, "syncthing", "dev", ldb, nil) set := newFileSet(t, "default", defaultFs, m.db)
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
{Name: "dummyfile", Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1}}}},
})
sub := m.evLogger.Subscribe(events.StateChanged) sub := m.evLogger.Subscribe(events.StateChanged)
defer sub.Unsubscribe() defer sub.Unsubscribe()
m.ServeBackground() m.ServeBackground()
@ -1678,7 +1673,7 @@ func TestRWScanRecovery(t *testing.T) {
func TestGlobalDirectoryTree(t *testing.T) { func TestGlobalDirectoryTree(t *testing.T) {
w, fcfg := tmpDefaultWrapper() w, fcfg := tmpDefaultWrapper()
m := setupModel(w) m := setupModel(t, w)
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI()) defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
b := func(isfile bool, path ...string) protocol.FileInfo { b := func(isfile bool, path ...string) protocol.FileInfo {
@ -1928,7 +1923,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
func TestGlobalDirectorySelfFixing(t *testing.T) { func TestGlobalDirectorySelfFixing(t *testing.T) {
w, fcfg := tmpDefaultWrapper() w, fcfg := tmpDefaultWrapper()
m := setupModel(w) m := setupModel(t, w)
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI()) defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
b := func(isfile bool, path ...string) protocol.FileInfo { b := func(isfile bool, path ...string) protocol.FileInfo {
@ -2101,8 +2096,7 @@ func BenchmarkTree_100_10(b *testing.B) {
} }
func benchmarkTree(b *testing.B, n1, n2 int) { func benchmarkTree(b *testing.B, n1, n2 int) {
db := db.NewLowlevel(backend.OpenMemory()) m := newModel(b, defaultCfgWrapper, myID, "syncthing", "dev", nil)
m := newModel(defaultCfgWrapper, myID, "syncthing", "dev", db, nil)
m.ServeBackground() m.ServeBackground()
defer cleanupModel(m) defer cleanupModel(m)
@ -2130,7 +2124,7 @@ func TestIssue3028(t *testing.T) {
// Create a model and default folder // Create a model and default folder
m := setupModel(defaultCfgWrapper) m := setupModel(t, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
// Get a count of how many files are there now // Get a count of how many files are there now
@ -2164,11 +2158,10 @@ func TestIssue3028(t *testing.T) {
} }
func TestIssue4357(t *testing.T) { func TestIssue4357(t *testing.T) {
db := db.NewLowlevel(backend.OpenMemory())
cfg := defaultCfgWrapper.RawCopy() cfg := defaultCfgWrapper.RawCopy()
// Create a separate wrapper not to pollute other tests. // Create a separate wrapper not to pollute other tests.
wrapper := createTmpWrapper(config.Configuration{}) wrapper := createTmpWrapper(config.Configuration{})
m := newModel(wrapper, myID, "syncthing", "dev", db, nil) m := newModel(t, wrapper, myID, "syncthing", "dev", nil)
m.ServeBackground() m.ServeBackground()
defer cleanupModel(m) defer cleanupModel(m)
@ -2271,7 +2264,7 @@ func TestIssue2782(t *testing.T) {
} }
defer os.RemoveAll(testDir) defer os.RemoveAll(testDir)
m := setupModel(defaultCfgWrapper) m := setupModel(t, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
if err := m.ScanFolder("default"); err != nil { if err := m.ScanFolder("default"); err != nil {
@ -2287,9 +2280,9 @@ func TestIssue2782(t *testing.T) {
} }
func TestIndexesForUnknownDevicesDropped(t *testing.T) { func TestIndexesForUnknownDevicesDropped(t *testing.T) {
dbi := db.NewLowlevel(backend.OpenMemory()) m := newModel(t, defaultCfgWrapper, myID, "syncthing", "dev", nil)
files := db.NewFileSet("default", defaultFs, dbi) files := newFileSet(t, "default", defaultFs, m.db)
files.Drop(device1) files.Drop(device1)
files.Update(device1, genFiles(1)) files.Update(device1, genFiles(1))
files.Drop(device2) files.Drop(device2)
@ -2299,12 +2292,11 @@ func TestIndexesForUnknownDevicesDropped(t *testing.T) {
t.Error("expected two devices") t.Error("expected two devices")
} }
m := newModel(defaultCfgWrapper, myID, "syncthing", "dev", dbi, nil)
m.newFolder(defaultFolderConfig, false) m.newFolder(defaultFolderConfig, false)
defer cleanupModel(m) defer cleanupModel(m)
// Remote sequence is cached, hence need to recreated. // Remote sequence is cached, hence need to recreated.
files = db.NewFileSet("default", defaultFs, dbi) files = newFileSet(t, "default", defaultFs, m.db)
if l := len(files.ListDevices()); l != 1 { if l := len(files.ListDevices()); l != 1 {
t.Errorf("Expected one device got %v", l) t.Errorf("Expected one device got %v", l)
@ -2319,7 +2311,7 @@ func TestSharedWithClearedOnDisconnect(t *testing.T) {
wcfg.SetFolder(fcfg) wcfg.SetFolder(fcfg)
defer os.Remove(wcfg.ConfigPath()) defer os.Remove(wcfg.ConfigPath())
m := setupModel(wcfg) m := setupModel(t, wcfg)
defer cleanupModel(m) defer cleanupModel(m)
conn1 := &fakeConnection{id: device1, model: m} conn1 := &fakeConnection{id: device1, model: m}
@ -2413,7 +2405,7 @@ func TestIssue3496(t *testing.T) {
// percentages. Lets make sure that doesn't happen. Also do some general // percentages. Lets make sure that doesn't happen. Also do some general
// checks on the completion calculation stuff. // checks on the completion calculation stuff.
m := setupModel(defaultCfgWrapper) m := setupModel(t, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
m.ScanFolder("default") m.ScanFolder("default")
@ -2484,7 +2476,7 @@ func TestIssue3496(t *testing.T) {
} }
func TestIssue3804(t *testing.T) { func TestIssue3804(t *testing.T) {
m := setupModel(defaultCfgWrapper) m := setupModel(t, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
// Subdirs ending in slash should be accepted // Subdirs ending in slash should be accepted
@ -2495,7 +2487,7 @@ func TestIssue3804(t *testing.T) {
} }
func TestIssue3829(t *testing.T) { func TestIssue3829(t *testing.T) {
m := setupModel(defaultCfgWrapper) m := setupModel(t, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
// Empty subdirs should be accepted // Empty subdirs should be accepted
@ -2514,7 +2506,7 @@ func TestNoRequestsFromPausedDevices(t *testing.T) {
fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{DeviceID: device2}) fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{DeviceID: device2})
wcfg.SetFolder(fcfg) wcfg.SetFolder(fcfg)
m := setupModel(wcfg) m := setupModel(t, wcfg)
defer cleanupModel(m) defer cleanupModel(m)
file := testDataExpected["foo"] file := testDataExpected["foo"]
@ -2598,7 +2590,7 @@ func TestIssue2571(t *testing.T) {
fd.Close() fd.Close()
} }
m := setupModel(w) m := setupModel(t, w)
defer cleanupModel(m) defer cleanupModel(m)
must(t, testFs.RemoveAll("toLink")) must(t, testFs.RemoveAll("toLink"))
@ -2637,7 +2629,7 @@ func TestIssue4573(t *testing.T) {
must(t, err) must(t, err)
fd.Close() fd.Close()
m := setupModel(w) m := setupModel(t, w)
defer cleanupModel(m) defer cleanupModel(m)
must(t, testFs.Chmod("inaccessible", 0000)) must(t, testFs.Chmod("inaccessible", 0000))
@ -2689,7 +2681,7 @@ func TestInternalScan(t *testing.T) {
} }
} }
m := setupModel(w) m := setupModel(t, w)
defer cleanupModel(m) defer cleanupModel(m)
for _, dir := range baseDirs { for _, dir := range baseDirs {
@ -2714,12 +2706,6 @@ func TestInternalScan(t *testing.T) {
func TestCustomMarkerName(t *testing.T) { func TestCustomMarkerName(t *testing.T) {
testOs := &fatalOs{t} testOs := &fatalOs{t}
ldb := db.NewLowlevel(backend.OpenMemory())
set := db.NewFileSet("default", defaultFs, ldb)
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
{Name: "dummyfile"},
})
fcfg := testFolderConfigTmp() fcfg := testFolderConfigTmp()
fcfg.ID = "default" fcfg.ID = "default"
fcfg.RescanIntervalS = 1 fcfg.RescanIntervalS = 1
@ -2735,7 +2721,12 @@ func TestCustomMarkerName(t *testing.T) {
testOs.RemoveAll(fcfg.Path) testOs.RemoveAll(fcfg.Path)
m := newModel(cfg, myID, "syncthing", "dev", ldb, nil) m := newModel(t, cfg, myID, "syncthing", "dev", nil)
set := newFileSet(t, "default", defaultFs, m.db)
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
{Name: "dummyfile"},
})
sub := m.evLogger.Subscribe(events.StateChanged) sub := m.evLogger.Subscribe(events.StateChanged)
defer sub.Unsubscribe() defer sub.Unsubscribe()
m.ServeBackground() m.ServeBackground()
@ -2761,7 +2752,7 @@ func TestRemoveDirWithContent(t *testing.T) {
must(t, err) must(t, err)
fd.Close() fd.Close()
m := setupModel(defaultCfgWrapper) m := setupModel(t, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
dir, ok := m.CurrentFolderFile("default", "dirwith") dir, ok := m.CurrentFolderFile("default", "dirwith")
@ -2812,7 +2803,7 @@ func TestRemoveDirWithContent(t *testing.T) {
} }
func TestIssue4475(t *testing.T) { func TestIssue4475(t *testing.T) {
m, conn, fcfg := setupModelWithConnection() m, conn, fcfg := setupModelWithConnection(t)
defer cleanupModel(m) defer cleanupModel(m)
testFs := fcfg.Filesystem() testFs := fcfg.Filesystem()
@ -2884,7 +2875,7 @@ func TestVersionRestore(t *testing.T) {
} }
cfg := createTmpWrapper(rawConfig) cfg := createTmpWrapper(rawConfig)
m := setupModel(cfg) m := setupModel(t, cfg)
defer cleanupModel(m) defer cleanupModel(m)
m.ScanFolder("default") m.ScanFolder("default")
@ -3062,7 +3053,7 @@ func TestVersionRestore(t *testing.T) {
func TestPausedFolders(t *testing.T) { func TestPausedFolders(t *testing.T) {
// Create a separate wrapper not to pollute other tests. // Create a separate wrapper not to pollute other tests.
wrapper := createTmpWrapper(defaultCfgWrapper.RawCopy()) wrapper := createTmpWrapper(defaultCfgWrapper.RawCopy())
m := setupModel(wrapper) m := setupModel(t, wrapper)
defer cleanupModel(m) defer cleanupModel(m)
if err := m.ScanFolder("default"); err != nil { if err := m.ScanFolder("default"); err != nil {
@ -3087,10 +3078,9 @@ func TestPausedFolders(t *testing.T) {
func TestIssue4094(t *testing.T) { func TestIssue4094(t *testing.T) {
testOs := &fatalOs{t} testOs := &fatalOs{t}
db := db.NewLowlevel(backend.OpenMemory())
// Create a separate wrapper not to pollute other tests. // Create a separate wrapper not to pollute other tests.
wrapper := createTmpWrapper(config.Configuration{}) wrapper := createTmpWrapper(config.Configuration{})
m := newModel(wrapper, myID, "syncthing", "dev", db, nil) m := newModel(t, wrapper, myID, "syncthing", "dev", nil)
m.ServeBackground() m.ServeBackground()
defer cleanupModel(m) defer cleanupModel(m)
@ -3123,11 +3113,8 @@ func TestIssue4094(t *testing.T) {
func TestIssue4903(t *testing.T) { func TestIssue4903(t *testing.T) {
testOs := &fatalOs{t} testOs := &fatalOs{t}
db := db.NewLowlevel(backend.OpenMemory())
// Create a separate wrapper not to pollute other tests.
wrapper := createTmpWrapper(config.Configuration{}) wrapper := createTmpWrapper(config.Configuration{})
m := newModel(wrapper, myID, "syncthing", "dev", db, nil) m := setupModel(t, wrapper)
m.ServeBackground()
defer cleanupModel(m) defer cleanupModel(m)
// Force the model to wire itself and add the folders // Force the model to wire itself and add the folders
@ -3159,7 +3146,7 @@ func TestIssue4903(t *testing.T) {
func TestIssue5002(t *testing.T) { func TestIssue5002(t *testing.T) {
// recheckFile should not panic when given an index equal to the number of blocks // recheckFile should not panic when given an index equal to the number of blocks
m := setupModel(defaultCfgWrapper) m := setupModel(t, defaultCfgWrapper)
defer cleanupModel(m) defer cleanupModel(m)
if err := m.ScanFolder("default"); err != nil { if err := m.ScanFolder("default"); err != nil {
@ -3178,7 +3165,7 @@ func TestIssue5002(t *testing.T) {
} }
func TestParentOfUnignored(t *testing.T) { func TestParentOfUnignored(t *testing.T) {
m := newState(defaultCfg) m := newState(t, defaultCfg)
defer cleanupModel(m) defer cleanupModel(m)
defer defaultFolderConfig.Filesystem().Remove(".stignore") defer defaultFolderConfig.Filesystem().Remove(".stignore")
@ -3202,7 +3189,7 @@ func TestFolderRestartZombies(t *testing.T) {
folderCfg.FilesystemType = fs.FilesystemTypeFake folderCfg.FilesystemType = fs.FilesystemTypeFake
wrapper.SetFolder(folderCfg) wrapper.SetFolder(folderCfg)
m := setupModel(wrapper) m := setupModel(t, wrapper)
defer cleanupModel(m) defer cleanupModel(m)
// Make sure the folder is up and running, because we want to count it. // Make sure the folder is up and running, because we want to count it.
@ -3248,7 +3235,7 @@ func TestRequestLimit(t *testing.T) {
dev, _ := wrapper.Device(device1) dev, _ := wrapper.Device(device1)
dev.MaxRequestKiB = 1 dev.MaxRequestKiB = 1
wrapper.SetDevice(dev) wrapper.SetDevice(dev)
m, _ := setupModelWithConnectionFromWrapper(wrapper) m, _ := setupModelWithConnectionFromWrapper(t, wrapper)
defer cleanupModel(m) defer cleanupModel(m)
file := "tmpfile" file := "tmpfile"
@ -3292,7 +3279,7 @@ func TestConnCloseOnRestart(t *testing.T) {
}() }()
w, fcfg := tmpDefaultWrapper() w, fcfg := tmpDefaultWrapper()
m := setupModel(w) m := setupModel(t, w)
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI()) defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
br := &testutils.BlockingRW{} br := &testutils.BlockingRW{}
@ -3331,7 +3318,7 @@ func TestModTimeWindow(t *testing.T) {
tfs := fcfg.Filesystem() tfs := fcfg.Filesystem()
fcfg.RawModTimeWindowS = 2 fcfg.RawModTimeWindowS = 2
w.SetFolder(fcfg) w.SetFolder(fcfg)
m := setupModel(w) m := setupModel(t, w)
defer cleanupModelAndRemoveDir(m, tfs.URI()) defer cleanupModelAndRemoveDir(m, tfs.URI())
name := "foo" name := "foo"
@ -3383,7 +3370,7 @@ func TestModTimeWindow(t *testing.T) {
} }
func TestDevicePause(t *testing.T) { func TestDevicePause(t *testing.T) {
m, _, fcfg := setupModelWithConnection() m, _, fcfg := setupModelWithConnection(t)
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI()) defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
sub := m.evLogger.Subscribe(events.DevicePaused) sub := m.evLogger.Subscribe(events.DevicePaused)
@ -3411,7 +3398,7 @@ func TestDevicePause(t *testing.T) {
} }
func TestDeviceWasSeen(t *testing.T) { func TestDeviceWasSeen(t *testing.T) {
m, _, fcfg := setupModelWithConnection() m, _, fcfg := setupModelWithConnection(t)
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI()) defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
m.deviceWasSeen(device1) m.deviceWasSeen(device1)
@ -3458,7 +3445,7 @@ func TestSummaryPausedNoError(t *testing.T) {
wcfg, fcfg := tmpDefaultWrapper() wcfg, fcfg := tmpDefaultWrapper()
fcfg.Paused = true fcfg.Paused = true
wcfg.SetFolder(fcfg) wcfg.SetFolder(fcfg)
m := setupModel(wcfg) m := setupModel(t, wcfg)
defer cleanupModel(m) defer cleanupModel(m)
fss := NewFolderSummaryService(wcfg, m, myID, events.NoopLogger) fss := NewFolderSummaryService(wcfg, m, myID, events.NoopLogger)
@ -3471,7 +3458,7 @@ func TestFolderAPIErrors(t *testing.T) {
wcfg, fcfg := tmpDefaultWrapper() wcfg, fcfg := tmpDefaultWrapper()
fcfg.Paused = true fcfg.Paused = true
wcfg.SetFolder(fcfg) wcfg.SetFolder(fcfg)
m := setupModel(wcfg) m := setupModel(t, wcfg)
defer cleanupModel(m) defer cleanupModel(m)
methods := []func(folder string) error{ methods := []func(folder string) error{
@ -3501,7 +3488,7 @@ func TestFolderAPIErrors(t *testing.T) {
func TestRenameSequenceOrder(t *testing.T) { func TestRenameSequenceOrder(t *testing.T) {
wcfg, fcfg := tmpDefaultWrapper() wcfg, fcfg := tmpDefaultWrapper()
m := setupModel(wcfg) m := setupModel(t, wcfg)
defer cleanupModel(m) defer cleanupModel(m)
numFiles := 20 numFiles := 20
@ -3571,7 +3558,7 @@ func TestRenameSequenceOrder(t *testing.T) {
func TestRenameSameFile(t *testing.T) { func TestRenameSameFile(t *testing.T) {
wcfg, fcfg := tmpDefaultWrapper() wcfg, fcfg := tmpDefaultWrapper()
m := setupModel(wcfg) m := setupModel(t, wcfg)
defer cleanupModel(m) defer cleanupModel(m)
ffs := fcfg.Filesystem() ffs := fcfg.Filesystem()
@ -3621,7 +3608,7 @@ func TestRenameSameFile(t *testing.T) {
func TestRenameEmptyFile(t *testing.T) { func TestRenameEmptyFile(t *testing.T) {
wcfg, fcfg := tmpDefaultWrapper() wcfg, fcfg := tmpDefaultWrapper()
m := setupModel(wcfg) m := setupModel(t, wcfg)
defer cleanupModel(m) defer cleanupModel(m)
ffs := fcfg.Filesystem() ffs := fcfg.Filesystem()
@ -3697,7 +3684,7 @@ func TestRenameEmptyFile(t *testing.T) {
func TestBlockListMap(t *testing.T) { func TestBlockListMap(t *testing.T) {
wcfg, fcfg := tmpDefaultWrapper() wcfg, fcfg := tmpDefaultWrapper()
m := setupModel(wcfg) m := setupModel(t, wcfg)
defer cleanupModel(m) defer cleanupModel(m)
ffs := fcfg.Filesystem() ffs := fcfg.Filesystem()
@ -3764,7 +3751,7 @@ func TestBlockListMap(t *testing.T) {
func TestScanRenameCaseOnly(t *testing.T) { func TestScanRenameCaseOnly(t *testing.T) {
wcfg, fcfg := tmpDefaultWrapper() wcfg, fcfg := tmpDefaultWrapper()
m := setupModel(wcfg) m := setupModel(t, wcfg)
defer cleanupModel(m) defer cleanupModel(m)
ffs := fcfg.Filesystem() ffs := fcfg.Filesystem()
@ -3922,7 +3909,7 @@ func TestScanDeletedROChangedOnSR(t *testing.T) {
fcfg.Type = config.FolderTypeReceiveOnly fcfg.Type = config.FolderTypeReceiveOnly
waiter, _ := w.SetFolder(fcfg) waiter, _ := w.SetFolder(fcfg)
waiter.Wait() waiter.Wait()
m := setupModel(w) m := setupModel(t, w)
defer cleanupModel(m) defer cleanupModel(m)
name := "foo" name := "foo"
ffs := fcfg.Filesystem() ffs := fcfg.Filesystem()
@ -3961,7 +3948,7 @@ func TestScanDeletedROChangedOnSR(t *testing.T) {
func testConfigChangeTriggersClusterConfigs(t *testing.T, expectFirst, expectSecond bool, pre func(config.Wrapper), fn func(config.Wrapper)) { func testConfigChangeTriggersClusterConfigs(t *testing.T, expectFirst, expectSecond bool, pre func(config.Wrapper), fn func(config.Wrapper)) {
t.Helper() t.Helper()
wcfg, _ := tmpDefaultWrapper() wcfg, _ := tmpDefaultWrapper()
m := setupModel(wcfg) m := setupModel(t, wcfg)
defer cleanupModel(m) defer cleanupModel(m)
_, err := wcfg.SetDevice(config.NewDeviceConfiguration(device2, "device2")) _, err := wcfg.SetDevice(config.NewDeviceConfiguration(device2, "device2"))
@ -4037,9 +4024,13 @@ func TestIssue6961(t *testing.T) {
fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{DeviceID: device2}) fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{DeviceID: device2})
wcfg.SetFolder(fcfg) wcfg.SetFolder(fcfg)
// Always recalc/repair when opening a fileset. // Always recalc/repair when opening a fileset.
// db := db.NewLowlevel(backend.OpenMemory(), db.WithRecheckInterval(time.Millisecond)) m := newModel(t, wcfg, myID, "syncthing", "dev", nil)
db := db.NewLowlevel(backend.OpenMemory()) m.db.Close()
m := newModel(wcfg, myID, "syncthing", "dev", db, nil) var err error
m.db, err = db.NewLowlevel(backend.OpenMemory(), m.evLogger, db.WithRecheckInterval(time.Millisecond))
if err != nil {
t.Fatal(err)
}
m.ServeBackground() m.ServeBackground()
defer cleanupModelAndRemoveDir(m, tfs.URI()) defer cleanupModelAndRemoveDir(m, tfs.URI())
m.ScanFolders() m.ScanFolders()
@ -4101,7 +4092,7 @@ func TestIssue6961(t *testing.T) {
func TestCompletionEmptyGlobal(t *testing.T) { func TestCompletionEmptyGlobal(t *testing.T) {
wcfg, fcfg := tmpDefaultWrapper() wcfg, fcfg := tmpDefaultWrapper()
m := setupModel(wcfg) m := setupModel(t, wcfg)
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI()) defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
files := []protocol.FileInfo{{Name: "foo", Version: protocol.Vector{}.Update(myID.Short()), Sequence: 1}} files := []protocol.FileInfo{{Name: "foo", Version: protocol.Vector{}.Update(myID.Short()), Sequence: 1}}
m.fmut.Lock() m.fmut.Lock()
@ -4123,7 +4114,7 @@ func TestNeedMetaAfterIndexReset(t *testing.T) {
fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{DeviceID: device2}) fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{DeviceID: device2})
waiter, _ = w.SetFolder(fcfg) waiter, _ = w.SetFolder(fcfg)
waiter.Wait() waiter.Wait()
m := setupModel(w) m := setupModel(t, w)
defer cleanupModelAndRemoveDir(m, fcfg.Path) defer cleanupModelAndRemoveDir(m, fcfg.Path)
var seq int64 = 1 var seq int64 = 1
@ -4158,7 +4149,7 @@ func TestNeedMetaAfterIndexReset(t *testing.T) {
func TestCcCheckEncryption(t *testing.T) { func TestCcCheckEncryption(t *testing.T) {
w, fcfg := tmpDefaultWrapper() w, fcfg := tmpDefaultWrapper()
m := setupModel(w) m := setupModel(t, w)
m.cancel() m.cancel()
defer cleanupModel(m) defer cleanupModel(m)
@ -4299,7 +4290,7 @@ func TestCCFolderNotRunning(t *testing.T) {
// Create the folder, but don't start it. // Create the folder, but don't start it.
w, fcfg := tmpDefaultWrapper() w, fcfg := tmpDefaultWrapper()
tfs := fcfg.Filesystem() tfs := fcfg.Filesystem()
m := newModel(w, myID, "syncthing", "dev", db.NewLowlevel(backend.OpenMemory()), nil) m := newModel(t, w, myID, "syncthing", "dev", nil)
defer cleanupModelAndRemoveDir(m, tfs.URI()) defer cleanupModelAndRemoveDir(m, tfs.URI())
// A connection can happen before all the folders are started. // A connection can happen before all the folders are started.
@ -4325,7 +4316,7 @@ func TestCCFolderNotRunning(t *testing.T) {
func TestPendingFolder(t *testing.T) { func TestPendingFolder(t *testing.T) {
w, _ := tmpDefaultWrapper() w, _ := tmpDefaultWrapper()
m := setupModel(w) m := setupModel(t, w)
defer cleanupModel(m) defer cleanupModel(m)
waiter, err := w.SetDevice(config.DeviceConfiguration{DeviceID: device2}) waiter, err := w.SetDevice(config.DeviceConfiguration{DeviceID: device2})

View File

@ -20,8 +20,6 @@ import (
"time" "time"
"github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/events" "github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
@ -31,7 +29,7 @@ func TestRequestSimple(t *testing.T) {
// Verify that the model performs a request and creates a file based on // Verify that the model performs a request and creates a file based on
// an incoming index update. // an incoming index update.
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
tfs := fcfg.Filesystem() tfs := fcfg.Filesystem()
defer cleanupModelAndRemoveDir(m, tfs.URI()) defer cleanupModelAndRemoveDir(m, tfs.URI())
@ -74,7 +72,7 @@ func TestSymlinkTraversalRead(t *testing.T) {
return return
} }
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI()) defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
// We listen for incoming index updates and trigger when we see one for // We listen for incoming index updates and trigger when we see one for
@ -117,7 +115,7 @@ func TestSymlinkTraversalWrite(t *testing.T) {
return return
} }
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI()) defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
// We listen for incoming index updates and trigger when we see one for // We listen for incoming index updates and trigger when we see one for
@ -176,7 +174,7 @@ func TestSymlinkTraversalWrite(t *testing.T) {
func TestRequestCreateTmpSymlink(t *testing.T) { func TestRequestCreateTmpSymlink(t *testing.T) {
// Test that an update for a temporary file is invalidated // Test that an update for a temporary file is invalidated
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI()) defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
// We listen for incoming index updates and trigger when we see one for // We listen for incoming index updates and trigger when we see one for
@ -226,7 +224,7 @@ func TestRequestVersioningSymlinkAttack(t *testing.T) {
fcfg.Versioning = config.VersioningConfiguration{Type: "trashcan"} fcfg.Versioning = config.VersioningConfiguration{Type: "trashcan"}
w.SetFolder(fcfg) w.SetFolder(fcfg)
m, fc := setupModelWithConnectionFromWrapper(w) m, fc := setupModelWithConnectionFromWrapper(t, w)
defer cleanupModel(m) defer cleanupModel(m)
// Create a temporary directory that we will use as target to see if // Create a temporary directory that we will use as target to see if
@ -300,10 +298,10 @@ func pullInvalidIgnored(t *testing.T, ft config.FolderType) {
fss := fcfg.Filesystem() fss := fcfg.Filesystem()
fcfg.Type = ft fcfg.Type = ft
w.SetFolder(fcfg) w.SetFolder(fcfg)
m := setupModel(w) m := setupModel(t, w)
defer cleanupModelAndRemoveDir(m, fss.URI()) defer cleanupModelAndRemoveDir(m, fss.URI())
folderIgnoresAlwaysReload(m, fcfg) folderIgnoresAlwaysReload(t, m, fcfg)
fc := addFakeConn(m, device1) fc := addFakeConn(m, device1)
fc.folder = "default" fc.folder = "default"
@ -422,7 +420,7 @@ func pullInvalidIgnored(t *testing.T, ft config.FolderType) {
} }
func TestIssue4841(t *testing.T) { func TestIssue4841(t *testing.T) {
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI()) defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
received := make(chan []protocol.FileInfo) received := make(chan []protocol.FileInfo)
@ -466,7 +464,7 @@ func TestIssue4841(t *testing.T) {
} }
func TestRescanIfHaveInvalidContent(t *testing.T) { func TestRescanIfHaveInvalidContent(t *testing.T) {
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
tfs := fcfg.Filesystem() tfs := fcfg.Filesystem()
defer cleanupModelAndRemoveDir(m, tfs.URI()) defer cleanupModelAndRemoveDir(m, tfs.URI())
@ -532,7 +530,7 @@ func TestRescanIfHaveInvalidContent(t *testing.T) {
} }
func TestParentDeletion(t *testing.T) { func TestParentDeletion(t *testing.T) {
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
testFs := fcfg.Filesystem() testFs := fcfg.Filesystem()
defer cleanupModelAndRemoveDir(m, testFs.URI()) defer cleanupModelAndRemoveDir(m, testFs.URI())
@ -611,7 +609,7 @@ func TestRequestSymlinkWindows(t *testing.T) {
t.Skip("windows specific test") t.Skip("windows specific test")
} }
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI()) defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
received := make(chan []protocol.FileInfo) received := make(chan []protocol.FileInfo)
@ -679,7 +677,7 @@ func equalContents(path string, contents []byte) error {
} }
func TestRequestRemoteRenameChanged(t *testing.T) { func TestRequestRemoteRenameChanged(t *testing.T) {
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
tfs := fcfg.Filesystem() tfs := fcfg.Filesystem()
tmpDir := tfs.URI() tmpDir := tfs.URI()
defer cleanupModelAndRemoveDir(m, tfs.URI()) defer cleanupModelAndRemoveDir(m, tfs.URI())
@ -814,7 +812,7 @@ func TestRequestRemoteRenameChanged(t *testing.T) {
} }
func TestRequestRemoteRenameConflict(t *testing.T) { func TestRequestRemoteRenameConflict(t *testing.T) {
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
tfs := fcfg.Filesystem() tfs := fcfg.Filesystem()
tmpDir := tfs.URI() tmpDir := tfs.URI()
defer cleanupModelAndRemoveDir(m, tmpDir) defer cleanupModelAndRemoveDir(m, tmpDir)
@ -905,7 +903,7 @@ func TestRequestRemoteRenameConflict(t *testing.T) {
} }
func TestRequestDeleteChanged(t *testing.T) { func TestRequestDeleteChanged(t *testing.T) {
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
tfs := fcfg.Filesystem() tfs := fcfg.Filesystem()
defer cleanupModelAndRemoveDir(m, tfs.URI()) defer cleanupModelAndRemoveDir(m, tfs.URI())
@ -974,7 +972,7 @@ func TestRequestDeleteChanged(t *testing.T) {
} }
func TestNeedFolderFiles(t *testing.T) { func TestNeedFolderFiles(t *testing.T) {
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
tfs := fcfg.Filesystem() tfs := fcfg.Filesystem()
tmpDir := tfs.URI() tmpDir := tfs.URI()
defer cleanupModelAndRemoveDir(m, tmpDir) defer cleanupModelAndRemoveDir(m, tmpDir)
@ -1023,12 +1021,12 @@ func TestNeedFolderFiles(t *testing.T) {
// https://github.com/syncthing/syncthing/issues/6038 // https://github.com/syncthing/syncthing/issues/6038
func TestIgnoreDeleteUnignore(t *testing.T) { func TestIgnoreDeleteUnignore(t *testing.T) {
w, fcfg := tmpDefaultWrapper() w, fcfg := tmpDefaultWrapper()
m := setupModel(w) m := setupModel(t, w)
fss := fcfg.Filesystem() fss := fcfg.Filesystem()
tmpDir := fss.URI() tmpDir := fss.URI()
defer cleanupModelAndRemoveDir(m, tmpDir) defer cleanupModelAndRemoveDir(m, tmpDir)
folderIgnoresAlwaysReload(m, fcfg) folderIgnoresAlwaysReload(t, m, fcfg)
m.ScanFolders() m.ScanFolders()
fc := addFakeConn(m, device1) fc := addFakeConn(m, device1)
@ -1122,7 +1120,7 @@ func TestIgnoreDeleteUnignore(t *testing.T) {
// TestRequestLastFileProgress checks that the last pulled file (here only) is registered // TestRequestLastFileProgress checks that the last pulled file (here only) is registered
// as in progress. // as in progress.
func TestRequestLastFileProgress(t *testing.T) { func TestRequestLastFileProgress(t *testing.T) {
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
tfs := fcfg.Filesystem() tfs := fcfg.Filesystem()
defer cleanupModelAndRemoveDir(m, tfs.URI()) defer cleanupModelAndRemoveDir(m, tfs.URI())
@ -1158,7 +1156,7 @@ func TestRequestIndexSenderPause(t *testing.T) {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
m, fc, fcfg := setupModelWithConnection() m, fc, fcfg := setupModelWithConnection(t)
tfs := fcfg.Filesystem() tfs := fcfg.Filesystem()
defer cleanupModelAndRemoveDir(m, tfs.URI()) defer cleanupModelAndRemoveDir(m, tfs.URI())
@ -1279,7 +1277,6 @@ func TestRequestIndexSenderPause(t *testing.T) {
} }
func TestRequestIndexSenderClusterConfigBeforeStart(t *testing.T) { func TestRequestIndexSenderClusterConfigBeforeStart(t *testing.T) {
ldb := db.NewLowlevel(backend.OpenMemory())
w, fcfg := tmpDefaultWrapper() w, fcfg := tmpDefaultWrapper()
tfs := fcfg.Filesystem() tfs := fcfg.Filesystem()
dir1 := "foo" dir1 := "foo"
@ -1287,16 +1284,19 @@ func TestRequestIndexSenderClusterConfigBeforeStart(t *testing.T) {
// Initialise db with an entry and then stop everything again // Initialise db with an entry and then stop everything again
must(t, tfs.Mkdir(dir1, 0777)) must(t, tfs.Mkdir(dir1, 0777))
m := newModel(w, myID, "syncthing", "dev", ldb, nil) m := newModel(t, w, myID, "syncthing", "dev", nil)
defer cleanupModelAndRemoveDir(m, tfs.URI()) defer cleanupModelAndRemoveDir(m, tfs.URI())
m.ServeBackground() m.ServeBackground()
m.ScanFolders() m.ScanFolders()
m.cancel() m.cancel()
m.evCancel()
<-m.stopped <-m.stopped
// Add connection (sends incoming cluster config) before starting the new model // Add connection (sends incoming cluster config) before starting the new model
m = newModel(w, myID, "syncthing", "dev", ldb, nil) m = &testModel{
model: NewModel(m.cfg, m.id, m.clientName, m.clientVersion, m.db, m.protectedFiles, m.evLogger).(*model),
evCancel: m.evCancel,
stopped: make(chan struct{}),
}
defer cleanupModel(m) defer cleanupModel(m)
fc := addFakeConn(m, device1) fc := addFakeConn(m, device1)
done := make(chan struct{}) done := make(chan struct{})
@ -1351,7 +1351,7 @@ func TestRequestReceiveEncryptedLocalNoSend(t *testing.T) {
must(t, tfs.Mkdir(config.DefaultMarkerName, 0777)) must(t, tfs.Mkdir(config.DefaultMarkerName, 0777))
must(t, writeEncryptionToken(encToken, fcfg)) must(t, writeEncryptionToken(encToken, fcfg))
m := setupModel(w) m := setupModel(t, w)
defer cleanupModelAndRemoveDir(m, tfs.URI()) defer cleanupModelAndRemoveDir(m, tfs.URI())
files := genFiles(2) files := genFiles(2)

View File

@ -96,14 +96,16 @@ func testFolderConfigFake() config.FolderConfiguration {
return cfg return cfg
} }
func setupModelWithConnection() (*testModel, *fakeConnection, config.FolderConfiguration) { func setupModelWithConnection(t testing.TB) (*testModel, *fakeConnection, config.FolderConfiguration) {
t.Helper()
w, fcfg := tmpDefaultWrapper() w, fcfg := tmpDefaultWrapper()
m, fc := setupModelWithConnectionFromWrapper(w) m, fc := setupModelWithConnectionFromWrapper(t, w)
return m, fc, fcfg return m, fc, fcfg
} }
func setupModelWithConnectionFromWrapper(w config.Wrapper) (*testModel, *fakeConnection) { func setupModelWithConnectionFromWrapper(t testing.TB, w config.Wrapper) (*testModel, *fakeConnection) {
m := setupModel(w) t.Helper()
m := setupModel(t, w)
fc := addFakeConn(m, device1) fc := addFakeConn(m, device1)
fc.folder = "default" fc.folder = "default"
@ -113,9 +115,9 @@ func setupModelWithConnectionFromWrapper(w config.Wrapper) (*testModel, *fakeCon
return m, fc return m, fc
} }
func setupModel(w config.Wrapper) *testModel { func setupModel(t testing.TB, w config.Wrapper) *testModel {
db := db.NewLowlevel(backend.OpenMemory()) t.Helper()
m := newModel(w, myID, "syncthing", "dev", db, nil) m := newModel(t, w, myID, "syncthing", "dev", nil)
m.ServeBackground() m.ServeBackground()
<-m.started <-m.started
@ -131,8 +133,13 @@ type testModel struct {
stopped chan struct{} stopped chan struct{}
} }
func newModel(cfg config.Wrapper, id protocol.DeviceID, clientName, clientVersion string, ldb *db.Lowlevel, protectedFiles []string) *testModel { func newModel(t testing.TB, cfg config.Wrapper, id protocol.DeviceID, clientName, clientVersion string, protectedFiles []string) *testModel {
t.Helper()
evLogger := events.NewLogger() evLogger := events.NewLogger()
ldb, err := db.NewLowlevel(backend.OpenMemory(), evLogger)
if err != nil {
t.Fatal(err)
}
m := NewModel(cfg, id, clientName, clientVersion, ldb, protectedFiles, evLogger).(*model) m := NewModel(cfg, id, clientName, clientVersion, ldb, protectedFiles, evLogger).(*model)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
go evLogger.Serve(ctx) go evLogger.Serve(ctx)
@ -250,9 +257,10 @@ func dbSnapshot(t *testing.T, m Model, folder string) *db.Snapshot {
// reloads when asked to, instead of checking file mtimes. This is // reloads when asked to, instead of checking file mtimes. This is
// because we will be changing the files on disk often enough that the // because we will be changing the files on disk often enough that the
// mtimes will be unreliable to determine change status. // mtimes will be unreliable to determine change status.
func folderIgnoresAlwaysReload(m *testModel, fcfg config.FolderConfiguration) { func folderIgnoresAlwaysReload(t testing.TB, m *testModel, fcfg config.FolderConfiguration) {
t.Helper()
m.removeFolder(fcfg) m.removeFolder(fcfg)
fset := db.NewFileSet(fcfg.ID, fcfg.Filesystem(), m.db) fset := newFileSet(t, fcfg.ID, fcfg.Filesystem(), m.db)
ignores := ignore.New(fcfg.Filesystem(), ignore.WithCache(true), ignore.WithChangeDetector(newAlwaysChanged())) ignores := ignore.New(fcfg.Filesystem(), ignore.WithCache(true), ignore.WithChangeDetector(newAlwaysChanged()))
m.fmut.Lock() m.fmut.Lock()
m.addAndStartFolderLockedWithIgnores(fcfg, fset, ignores) m.addAndStartFolderLockedWithIgnores(fcfg, fset, ignores)
@ -296,3 +304,12 @@ func localIndexUpdate(m *testModel, folder string, fs []protocol.FileInfo) {
"version": seq, // legacy for sequence "version": seq, // legacy for sequence
}) })
} }
func newFileSet(t testing.TB, folder string, fs fs.Filesystem, ldb *db.Lowlevel) *db.FileSet {
t.Helper()
fset, err := db.NewFileSet(folder, fs, ldb)
if err != nil {
t.Fatal(err)
}
return fset
}

View File

@ -80,17 +80,21 @@ type App struct {
stopped chan struct{} stopped chan struct{}
} }
func New(cfg config.Wrapper, dbBackend backend.Backend, evLogger events.Logger, cert tls.Certificate, opts Options) *App { func New(cfg config.Wrapper, dbBackend backend.Backend, evLogger events.Logger, cert tls.Certificate, opts Options) (*App, error) {
ll, err := db.NewLowlevel(dbBackend, evLogger, db.WithRecheckInterval(opts.DBRecheckInterval), db.WithIndirectGCInterval(opts.DBIndirectGCInterval))
if err != nil {
return nil, err
}
a := &App{ a := &App{
cfg: cfg, cfg: cfg,
ll: db.NewLowlevel(dbBackend, db.WithRecheckInterval(opts.DBRecheckInterval), db.WithIndirectGCInterval(opts.DBIndirectGCInterval)), ll: ll,
evLogger: evLogger, evLogger: evLogger,
opts: opts, opts: opts,
cert: cert, cert: cert,
stopped: make(chan struct{}), stopped: make(chan struct{}),
} }
close(a.stopped) // Hasn't been started, so shouldn't block on Wait. close(a.stopped) // Hasn't been started, so shouldn't block on Wait.
return a return a, nil
} }
// Start executes the app and returns once all the startup operations are done, // Start executes the app and returns once all the startup operations are done,

View File

@ -80,7 +80,10 @@ func TestStartupFail(t *testing.T) {
defer os.Remove(cfg.ConfigPath()) defer os.Remove(cfg.ConfigPath())
db := backend.OpenMemory() db := backend.OpenMemory()
app := New(cfg, db, events.NoopLogger, cert, Options{}) app, err := New(cfg, db, events.NoopLogger, cert, Options{})
if err != nil {
t.Fatal(err)
}
startErr := app.Start() startErr := app.Start()
if startErr == nil { if startErr == nil {
t.Fatal("Expected an error from Start, got nil") t.Fatal("Expected an error from Start, got nil")

View File

@ -8,6 +8,7 @@ package util
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"net" "net"
"net/url" "net/url"
@ -262,6 +263,19 @@ type FatalErr struct {
Status ExitStatus Status ExitStatus
} }
// AsFatalErr wraps the given error creating a FatalErr. If the given error
// already is of type FatalErr, it is not wrapped again.
func AsFatalErr(err error, status ExitStatus) *FatalErr {
var ferr *FatalErr
if errors.As(err, &ferr) {
return ferr
}
return &FatalErr{
Err: err,
Status: status,
}
}
func (e *FatalErr) Error() string { func (e *FatalErr) Error() string {
return e.Err.Error() return e.Err.Error()
} }