lib/model: Use a single lock (phase two: cleanup) (#9276)

Cleanup after #9275.

This renames `fmut` -> `mut`, removes the deadlock detector and
associated plumbing, renames some things from `...PRLocked` to
`...RLocked` and similar, and updates comments.

Apart from the removal of the deadlock detection machinery, no
functional code changes... i.e. almost 100% diff noise, have fun
reviewing.
This commit is contained in:
Jakob Borg 2023-12-11 22:06:45 +01:00 committed by GitHub
parent d21a2de055
commit 935a28c961
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 210 additions and 396 deletions

View File

@ -88,9 +88,6 @@ above.
STTRACE A comma separated string of facilities to trace. The valid STTRACE A comma separated string of facilities to trace. The valid
facility strings are listed below. facility strings are listed below.
STDEADLOCKTIMEOUT Used for debugging internal deadlocks; sets debug
sensitivity. Use only under direction of a developer.
STLOCKTHRESHOLD Used for debugging internal deadlocks; sets debug STLOCKTHRESHOLD Used for debugging internal deadlocks; sets debug
sensitivity. Use only under direction of a developer. sensitivity. Use only under direction of a developer.
@ -173,7 +170,6 @@ type serveOptions struct {
// Debug options below // Debug options below
DebugDBIndirectGCInterval time.Duration `env:"STGCINDIRECTEVERY" help:"Database indirection GC interval"` DebugDBIndirectGCInterval time.Duration `env:"STGCINDIRECTEVERY" help:"Database indirection GC interval"`
DebugDBRecheckInterval time.Duration `env:"STRECHECKDBEVERY" help:"Database metadata recalculation interval"` DebugDBRecheckInterval time.Duration `env:"STRECHECKDBEVERY" help:"Database metadata recalculation interval"`
DebugDeadlockTimeout int `placeholder:"SECONDS" env:"STDEADLOCKTIMEOUT" help:"Used for debugging internal deadlocks"`
DebugGUIAssetsDir string `placeholder:"PATH" help:"Directory to load GUI assets from" env:"STGUIASSETS"` DebugGUIAssetsDir string `placeholder:"PATH" help:"Directory to load GUI assets from" env:"STGUIASSETS"`
DebugPerfStats bool `env:"STPERFSTATS" help:"Write running performance statistics to perf-$pid.csv (Unix only)"` DebugPerfStats bool `env:"STPERFSTATS" help:"Write running performance statistics to perf-$pid.csv (Unix only)"`
DebugProfileBlock bool `env:"STBLOCKPROFILE" help:"Write block profiles to block-$pid-$timestamp.pprof every 20 seconds"` DebugProfileBlock bool `env:"STBLOCKPROFILE" help:"Write block profiles to block-$pid-$timestamp.pprof every 20 seconds"`
@ -623,7 +619,6 @@ func syncthingMain(options serveOptions) {
} }
appOpts := syncthing.Options{ appOpts := syncthing.Options{
DeadlockTimeoutS: options.DebugDeadlockTimeout,
NoUpgrade: options.NoUpgrade, NoUpgrade: options.NoUpgrade,
ProfilerAddr: options.DebugProfilerListen, ProfilerAddr: options.DebugProfilerListen,
ResetDeltaIdxs: options.DebugResetDeltaIdxs, ResetDeltaIdxs: options.DebugResetDeltaIdxs,
@ -634,10 +629,6 @@ func syncthingMain(options serveOptions) {
if options.Audit { if options.Audit {
appOpts.AuditWriter = auditWriter(options.AuditFile) appOpts.AuditWriter = auditWriter(options.AuditFile)
} }
if t := os.Getenv("STDEADLOCKTIMEOUT"); t != "" {
secs, _ := strconv.Atoi(t)
appOpts.DeadlockTimeoutS = secs
}
if dur, err := time.ParseDuration(os.Getenv("STRECHECKDBEVERY")); err == nil { if dur, err := time.ParseDuration(os.Getenv("STRECHECKDBEVERY")); err == nil {
appOpts.DBRecheckInterval = dur appOpts.DBRecheckInterval = dur
} }

2
go.mod
View File

@ -40,7 +40,6 @@ require (
github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect
github.com/quic-go/quic-go v0.40.0 github.com/quic-go/quic-go v0.40.0
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
github.com/sasha-s/go-deadlock v0.3.1
github.com/shirou/gopsutil/v3 v3.23.11 github.com/shirou/gopsutil/v3 v3.23.11
github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2 github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d
@ -68,7 +67,6 @@ require (
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/onsi/ginkgo/v2 v2.13.2 // indirect github.com/onsi/ginkgo/v2 v2.13.2 // indirect
github.com/oschwald/maxminddb-golang v1.12.0 // indirect github.com/oschwald/maxminddb-golang v1.12.0 // indirect
github.com/petermattis/goid v0.0.0-20231126143041-f558c26febf5 // indirect
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect
github.com/quic-go/qtls-go1-20 v0.4.1 // indirect github.com/quic-go/qtls-go1-20 v0.4.1 // indirect

5
go.sum
View File

@ -139,9 +139,6 @@ github.com/oschwald/geoip2-golang v1.9.0 h1:uvD3O6fXAXs+usU+UGExshpdP13GAqp4GBrz
github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y= github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y=
github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs= github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs=
github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY= github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
github.com/petermattis/goid v0.0.0-20231126143041-f558c26febf5 h1:+qIP3OMrT7SN5kLnTcVEISPOMB/97RyAKTg1UWA738E=
github.com/petermattis/goid v0.0.0-20231126143041-f558c26febf5/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -168,8 +165,6 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5X
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0=
github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8=
github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ=
github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=

View File

@ -535,8 +535,8 @@ func setupROFolder(t *testing.T) (*testModel, *receiveOnlyFolder, context.Cancel
<-m.started <-m.started
must(t, m.ScanFolder("ro")) must(t, m.ScanFolder("ro"))
m.fmut.RLock() m.mut.RLock()
defer m.fmut.RUnlock() defer m.mut.RUnlock()
r, _ := m.folderRunners.Get("ro") r, _ := m.folderRunners.Get("ro")
f := r.(*receiveOnlyFolder) f := r.(*receiveOnlyFolder)

View File

@ -531,11 +531,6 @@ type Model struct {
setIgnoresReturnsOnCall map[int]struct { setIgnoresReturnsOnCall map[int]struct {
result1 error result1 error
} }
StartDeadlockDetectorStub func(time.Duration)
startDeadlockDetectorMutex sync.RWMutex
startDeadlockDetectorArgsForCall []struct {
arg1 time.Duration
}
StateStub func(string) (string, time.Time, error) StateStub func(string) (string, time.Time, error)
stateMutex sync.RWMutex stateMutex sync.RWMutex
stateArgsForCall []struct { stateArgsForCall []struct {
@ -3070,38 +3065,6 @@ func (fake *Model) SetIgnoresReturnsOnCall(i int, result1 error) {
}{result1} }{result1}
} }
func (fake *Model) StartDeadlockDetector(arg1 time.Duration) {
fake.startDeadlockDetectorMutex.Lock()
fake.startDeadlockDetectorArgsForCall = append(fake.startDeadlockDetectorArgsForCall, struct {
arg1 time.Duration
}{arg1})
stub := fake.StartDeadlockDetectorStub
fake.recordInvocation("StartDeadlockDetector", []interface{}{arg1})
fake.startDeadlockDetectorMutex.Unlock()
if stub != nil {
fake.StartDeadlockDetectorStub(arg1)
}
}
func (fake *Model) StartDeadlockDetectorCallCount() int {
fake.startDeadlockDetectorMutex.RLock()
defer fake.startDeadlockDetectorMutex.RUnlock()
return len(fake.startDeadlockDetectorArgsForCall)
}
func (fake *Model) StartDeadlockDetectorCalls(stub func(time.Duration)) {
fake.startDeadlockDetectorMutex.Lock()
defer fake.startDeadlockDetectorMutex.Unlock()
fake.StartDeadlockDetectorStub = stub
}
func (fake *Model) StartDeadlockDetectorArgsForCall(i int) time.Duration {
fake.startDeadlockDetectorMutex.RLock()
defer fake.startDeadlockDetectorMutex.RUnlock()
argsForCall := fake.startDeadlockDetectorArgsForCall[i]
return argsForCall.arg1
}
func (fake *Model) State(arg1 string) (string, time.Time, error) { func (fake *Model) State(arg1 string) (string, time.Time, error) {
fake.stateMutex.Lock() fake.stateMutex.Lock()
ret, specificReturn := fake.stateReturnsOnCall[len(fake.stateArgsForCall)] ret, specificReturn := fake.stateReturnsOnCall[len(fake.stateArgsForCall)]
@ -3351,8 +3314,6 @@ func (fake *Model) Invocations() map[string][][]interface{} {
defer fake.serveMutex.RUnlock() defer fake.serveMutex.RUnlock()
fake.setIgnoresMutex.RLock() fake.setIgnoresMutex.RLock()
defer fake.setIgnoresMutex.RUnlock() defer fake.setIgnoresMutex.RUnlock()
fake.startDeadlockDetectorMutex.RLock()
defer fake.startDeadlockDetectorMutex.RUnlock()
fake.stateMutex.RLock() fake.stateMutex.RLock()
defer fake.stateMutex.RUnlock() defer fake.stateMutex.RUnlock()
fake.usageReportingStatsMutex.RLock() fake.usageReportingStatsMutex.RLock()

View File

@ -116,7 +116,6 @@ type Model interface {
DismissPendingDevice(device protocol.DeviceID) error DismissPendingDevice(device protocol.DeviceID) error
DismissPendingFolder(device protocol.DeviceID, folder string) error DismissPendingFolder(device protocol.DeviceID, folder string) error
StartDeadlockDetector(timeout time.Duration)
GlobalDirectoryTree(folder, prefix string, levels int, dirsOnly bool) ([]*TreeEntry, error) GlobalDirectoryTree(folder, prefix string, levels int, dirsOnly bool) ([]*TreeEntry, error)
} }
@ -145,8 +144,8 @@ type model struct {
keyGen *protocol.KeyGenerator keyGen *protocol.KeyGenerator
promotionTimer *time.Timer promotionTimer *time.Timer
// fields protected by fmut // fields protected by mut
fmut sync.RWMutex mut sync.RWMutex
folderCfgs map[string]config.FolderConfiguration // folder -> cfg folderCfgs map[string]config.FolderConfiguration // folder -> cfg
folderFiles map[string]*db.FileSet // folder -> files folderFiles map[string]*db.FileSet // folder -> files
deviceStatRefs map[protocol.DeviceID]*stats.DeviceStatisticsReference // deviceID -> statsRef deviceStatRefs map[protocol.DeviceID]*stats.DeviceStatisticsReference // deviceID -> statsRef
@ -156,8 +155,6 @@ type model struct {
folderVersioners map[string]versioner.Versioner // folder -> versioner (may be nil) folderVersioners map[string]versioner.Versioner // folder -> versioner (may be nil)
folderEncryptionPasswordTokens map[string][]byte // folder -> encryption token (may be missing, and only for encryption type folders) folderEncryptionPasswordTokens map[string][]byte // folder -> encryption token (may be missing, and only for encryption type folders)
folderEncryptionFailures map[string]map[protocol.DeviceID]error // folder -> device -> error regarding encryption consistency (may be missing) folderEncryptionFailures map[string]map[protocol.DeviceID]error // folder -> device -> error regarding encryption consistency (may be missing)
// fields also protected by fmut
connections map[string]protocol.Connection // connection ID -> connection connections map[string]protocol.Connection // connection ID -> connection
deviceConnIDs map[protocol.DeviceID][]string // device -> connection IDs (invariant: if the key exists, the value is len >= 1, with the primary connection at the start of the slice) deviceConnIDs map[protocol.DeviceID][]string // device -> connection IDs (invariant: if the key exists, the value is len >= 1, with the primary connection at the start of the slice)
promotedConnID map[protocol.DeviceID]string // device -> latest promoted connection ID promotedConnID map[protocol.DeviceID]string // device -> latest promoted connection ID
@ -226,8 +223,8 @@ func NewModel(cfg config.Wrapper, id protocol.DeviceID, ldb *db.Lowlevel, protec
keyGen: keyGen, keyGen: keyGen,
promotionTimer: time.NewTimer(0), promotionTimer: time.NewTimer(0),
// fields protected by fmut // fields protected by mut
fmut: sync.NewRWMutex(), mut: sync.NewRWMutex(),
folderCfgs: make(map[string]config.FolderConfiguration), folderCfgs: make(map[string]config.FolderConfiguration),
folderFiles: make(map[string]*db.FileSet), folderFiles: make(map[string]*db.FileSet),
deviceStatRefs: make(map[protocol.DeviceID]*stats.DeviceStatisticsReference), deviceStatRefs: make(map[protocol.DeviceID]*stats.DeviceStatisticsReference),
@ -236,8 +233,6 @@ func NewModel(cfg config.Wrapper, id protocol.DeviceID, ldb *db.Lowlevel, protec
folderVersioners: make(map[string]versioner.Versioner), folderVersioners: make(map[string]versioner.Versioner),
folderEncryptionPasswordTokens: make(map[string][]byte), folderEncryptionPasswordTokens: make(map[string][]byte),
folderEncryptionFailures: make(map[string]map[protocol.DeviceID]error), folderEncryptionFailures: make(map[string]map[protocol.DeviceID]error),
// ditto
connections: make(map[string]protocol.Connection), connections: make(map[string]protocol.Connection),
deviceConnIDs: make(map[protocol.DeviceID][]string), deviceConnIDs: make(map[protocol.DeviceID][]string),
promotedConnID: make(map[protocol.DeviceID]string), promotedConnID: make(map[protocol.DeviceID]string),
@ -250,7 +245,7 @@ func NewModel(cfg config.Wrapper, id protocol.DeviceID, ldb *db.Lowlevel, protec
} }
for devID, cfg := range cfg.Devices() { for devID, cfg := range cfg.Devices() {
m.deviceStatRefs[devID] = stats.NewDeviceStatisticsReference(m.db, devID) m.deviceStatRefs[devID] = stats.NewDeviceStatisticsReference(m.db, devID)
m.setConnRequestLimitersPLocked(cfg) m.setConnRequestLimitersLocked(cfg)
} }
m.Add(m.folderRunners) m.Add(m.folderRunners)
m.Add(m.progressEmitter) m.Add(m.progressEmitter)
@ -310,13 +305,13 @@ func (m *model) initFolders(cfg config.Configuration) error {
} }
func (m *model) closeAllConnectionsAndWait() { func (m *model) closeAllConnectionsAndWait() {
m.fmut.RLock() m.mut.RLock()
closed := make([]chan struct{}, 0, len(m.connections)) closed := make([]chan struct{}, 0, len(m.connections))
for connID, conn := range m.connections { for connID, conn := range m.connections {
closed = append(closed, m.closed[connID]) closed = append(closed, m.closed[connID])
go conn.Close(errStopped) go conn.Close(errStopped)
} }
m.fmut.RUnlock() m.mut.RUnlock()
for _, c := range closed { for _, c := range closed {
<-c <-c
} }
@ -329,16 +324,7 @@ func (m *model) fatal(err error) {
} }
} }
// StartDeadlockDetector starts a deadlock detector on the models locks which // Need to hold lock on m.mut when calling this.
// causes panics in case the locks cannot be acquired in the given timeout
// period.
func (m *model) StartDeadlockDetector(timeout time.Duration) {
l.Infof("Starting deadlock detector with %v timeout", timeout)
detector := newDeadlockDetector(timeout, m.evLogger, m.fatal)
detector.Watch("fmut", m.fmut)
}
// Need to hold lock on m.fmut when calling this.
func (m *model) addAndStartFolderLocked(cfg config.FolderConfiguration, fset *db.FileSet, cacheIgnoredFiles bool) { func (m *model) addAndStartFolderLocked(cfg config.FolderConfiguration, fset *db.FileSet, cacheIgnoredFiles bool) {
ignores := ignore.New(cfg.Filesystem(nil), ignore.WithCache(cacheIgnoredFiles)) ignores := ignore.New(cfg.Filesystem(nil), ignore.WithCache(cacheIgnoredFiles))
if cfg.Type != config.FolderTypeReceiveEncrypted { if cfg.Type != config.FolderTypeReceiveEncrypted {
@ -461,14 +447,12 @@ func (m *model) warnAboutOverwritingProtectedFiles(cfg config.FolderConfiguratio
} }
func (m *model) removeFolder(cfg config.FolderConfiguration) { func (m *model) removeFolder(cfg config.FolderConfiguration) {
m.fmut.RLock() m.mut.RLock()
wait := m.folderRunners.StopAndWaitChan(cfg.ID, 0) wait := m.folderRunners.StopAndWaitChan(cfg.ID, 0)
m.fmut.RUnlock() m.mut.RUnlock()
<-wait <-wait
// We need to hold both fmut and pmut and must acquire locks in the same m.mut.Lock()
// order always. (The locks can be *released* in any order.)
m.fmut.Lock()
isPathUnique := true isPathUnique := true
for folderID, folderCfg := range m.folderCfgs { for folderID, folderCfg := range m.folderCfgs {
@ -493,13 +477,13 @@ func (m *model) removeFolder(cfg config.FolderConfiguration) {
return nil return nil
}) })
m.fmut.Unlock() m.mut.Unlock()
// Remove it from the database // Remove it from the database
db.DropFolder(m.db, cfg.ID) db.DropFolder(m.db, cfg.ID)
} }
// Need to hold lock on m.fmut when calling this. // Need to hold lock on m.mut when calling this.
func (m *model) cleanupFolderLocked(cfg config.FolderConfiguration) { func (m *model) cleanupFolderLocked(cfg config.FolderConfiguration) {
// clear up our config maps // clear up our config maps
m.folderRunners.Remove(cfg.ID) m.folderRunners.Remove(cfg.ID)
@ -523,7 +507,7 @@ func (m *model) restartFolder(from, to config.FolderConfiguration, cacheIgnoredF
// This mutex protects the entirety of the restart operation, preventing // This mutex protects the entirety of the restart operation, preventing
// there from being more than one folder restart operation in progress // there from being more than one folder restart operation in progress
// at any given time. The usual fmut/pmut stuff doesn't cover this, // at any given time. The usual locking stuff doesn't cover this,
// because those locks are released while we are waiting for the folder // because those locks are released while we are waiting for the folder
// to shut down (and must be so because the folder might need them as // to shut down (and must be so because the folder might need them as
// part of its operations before shutting down). // part of its operations before shutting down).
@ -531,13 +515,13 @@ func (m *model) restartFolder(from, to config.FolderConfiguration, cacheIgnoredF
restartMut.Lock() restartMut.Lock()
defer restartMut.Unlock() defer restartMut.Unlock()
m.fmut.RLock() m.mut.RLock()
wait := m.folderRunners.StopAndWaitChan(from.ID, 0) wait := m.folderRunners.StopAndWaitChan(from.ID, 0)
m.fmut.RUnlock() m.mut.RUnlock()
<-wait <-wait
m.fmut.Lock() m.mut.Lock()
defer m.fmut.Unlock() defer m.mut.Unlock()
// Cache the (maybe) existing fset before it's removed by cleanupFolderLocked // Cache the (maybe) existing fset before it's removed by cleanupFolderLocked
fset := m.folderFiles[folder] fset := m.folderFiles[folder]
@ -586,8 +570,8 @@ func (m *model) newFolder(cfg config.FolderConfiguration, cacheIgnoredFiles bool
return fmt.Errorf("adding %v: %w", cfg.Description(), err) return fmt.Errorf("adding %v: %w", cfg.Description(), err)
} }
m.fmut.Lock() m.mut.Lock()
defer m.fmut.Unlock() defer m.mut.Unlock()
m.addAndStartFolderLocked(cfg, fset, cacheIgnoredFiles) m.addAndStartFolderLocked(cfg, fset, cacheIgnoredFiles)
@ -632,11 +616,11 @@ func (m *model) UsageReportingStats(report *contract.Report, version int, previe
blockStatsMut.Unlock() blockStatsMut.Unlock()
// Transport stats // Transport stats
m.fmut.RLock() m.mut.RLock()
for _, conn := range m.connections { for _, conn := range m.connections {
report.TransportStats[conn.Transport()]++ report.TransportStats[conn.Transport()]++
} }
m.fmut.RUnlock() m.mut.RUnlock()
// Ignore stats // Ignore stats
var seenPrefix [3]bool var seenPrefix [3]bool
@ -723,8 +707,8 @@ type ConnectionInfo struct {
// ConnectionStats returns a map with connection statistics for each device. // ConnectionStats returns a map with connection statistics for each device.
func (m *model) ConnectionStats() map[string]interface{} { func (m *model) ConnectionStats() map[string]interface{} {
m.fmut.RLock() m.mut.RLock()
defer m.fmut.RUnlock() defer m.mut.RUnlock()
res := make(map[string]interface{}) res := make(map[string]interface{})
devs := m.cfg.Devices() devs := m.cfg.Devices()
@ -797,8 +781,8 @@ func (m *model) ConnectionStats() map[string]interface{} {
// DeviceStatistics returns statistics about each device // DeviceStatistics returns statistics about each device
func (m *model) DeviceStatistics() (map[protocol.DeviceID]stats.DeviceStatistics, error) { func (m *model) DeviceStatistics() (map[protocol.DeviceID]stats.DeviceStatistics, error) {
m.fmut.RLock() m.mut.RLock()
defer m.fmut.RUnlock() defer m.mut.RUnlock()
res := make(map[protocol.DeviceID]stats.DeviceStatistics, len(m.deviceStatRefs)) res := make(map[protocol.DeviceID]stats.DeviceStatistics, len(m.deviceStatRefs))
for id, sr := range m.deviceStatRefs { for id, sr := range m.deviceStatRefs {
stats, err := sr.GetStatistics() stats, err := sr.GetStatistics()
@ -818,8 +802,8 @@ func (m *model) DeviceStatistics() (map[protocol.DeviceID]stats.DeviceStatistics
// FolderStatistics returns statistics about each folder // FolderStatistics returns statistics about each folder
func (m *model) FolderStatistics() (map[string]stats.FolderStatistics, error) { func (m *model) FolderStatistics() (map[string]stats.FolderStatistics, error) {
res := make(map[string]stats.FolderStatistics) res := make(map[string]stats.FolderStatistics)
m.fmut.RLock() m.mut.RLock()
defer m.fmut.RUnlock() defer m.mut.RUnlock()
err := m.folderRunners.Each(func(id string, runner service) error { err := m.folderRunners.Each(func(id string, runner service) error {
stats, err := runner.GetStatistics() stats, err := runner.GetStatistics()
if err != nil { if err != nil {
@ -936,10 +920,10 @@ func (m *model) Completion(device protocol.DeviceID, folder string) (FolderCompl
} }
func (m *model) folderCompletion(device protocol.DeviceID, folder string) (FolderCompletion, error) { func (m *model) folderCompletion(device protocol.DeviceID, folder string) (FolderCompletion, error) {
m.fmut.RLock() m.mut.RLock()
err := m.checkFolderRunningLocked(folder) err := m.checkFolderRunningRLocked(folder)
rf := m.folderFiles[folder] rf := m.folderFiles[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if err != nil { if err != nil {
return FolderCompletion{}, err return FolderCompletion{}, err
} }
@ -950,10 +934,10 @@ func (m *model) folderCompletion(device protocol.DeviceID, folder string) (Folde
} }
defer snap.Release() defer snap.Release()
m.fmut.RLock() m.mut.RLock()
state := m.remoteFolderStates[device][folder] state := m.remoteFolderStates[device][folder]
downloaded := m.deviceDownloads[device].BytesDownloaded(folder) downloaded := m.deviceDownloads[device].BytesDownloaded(folder)
m.fmut.RUnlock() m.mut.RUnlock()
need := snap.NeedSize(device) need := snap.NeedSize(device)
need.Bytes -= downloaded need.Bytes -= downloaded
@ -970,10 +954,10 @@ func (m *model) folderCompletion(device protocol.DeviceID, folder string) (Folde
// DBSnapshot returns a snapshot of the database content relevant to the given folder. // DBSnapshot returns a snapshot of the database content relevant to the given folder.
func (m *model) DBSnapshot(folder string) (*db.Snapshot, error) { func (m *model) DBSnapshot(folder string) (*db.Snapshot, error) {
m.fmut.RLock() m.mut.RLock()
err := m.checkFolderRunningLocked(folder) err := m.checkFolderRunningRLocked(folder)
rf := m.folderFiles[folder] rf := m.folderFiles[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -987,11 +971,11 @@ func (m *model) FolderProgressBytesCompleted(folder string) int64 {
// NeedFolderFiles returns paginated list of currently needed files in // NeedFolderFiles returns paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration. // progress, queued, and to be queued on next puller iteration.
func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated, error) { func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated, error) {
m.fmut.RLock() m.mut.RLock()
rf, rfOk := m.folderFiles[folder] rf, rfOk := m.folderFiles[folder]
runner, runnerOk := m.folderRunners.Get(folder) runner, runnerOk := m.folderRunners.Get(folder)
cfg := m.folderCfgs[folder] cfg := m.folderCfgs[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if !rfOk { if !rfOk {
return nil, nil, nil, ErrFolderMissing return nil, nil, nil, ErrFolderMissing
@ -1058,9 +1042,9 @@ func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfo
// RemoteNeedFolderFiles returns paginated list of currently needed files for a // RemoteNeedFolderFiles returns paginated list of currently needed files for a
// remote device to become synced with a folder. // remote device to become synced with a folder.
func (m *model) RemoteNeedFolderFiles(folder string, device protocol.DeviceID, page, perpage int) ([]db.FileInfoTruncated, error) { func (m *model) RemoteNeedFolderFiles(folder string, device protocol.DeviceID, page, perpage int) ([]db.FileInfoTruncated, error) {
m.fmut.RLock() m.mut.RLock()
rf, ok := m.folderFiles[folder] rf, ok := m.folderFiles[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
return nil, ErrFolderMissing return nil, ErrFolderMissing
@ -1085,9 +1069,9 @@ func (m *model) RemoteNeedFolderFiles(folder string, device protocol.DeviceID, p
} }
func (m *model) LocalChangedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, error) { func (m *model) LocalChangedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, error) {
m.fmut.RLock() m.mut.RLock()
rf, ok := m.folderFiles[folder] rf, ok := m.folderFiles[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
return nil, ErrFolderMissing return nil, ErrFolderMissing
@ -1176,9 +1160,9 @@ func (m *model) handleIndex(conn protocol.Connection, folder string, fs []protoc
return fmt.Errorf("%s: %w", folder, ErrFolderPaused) return fmt.Errorf("%s: %w", folder, ErrFolderPaused)
} }
m.fmut.RLock() m.mut.RLock()
indexHandler, ok := m.getIndexHandlerPRLocked(conn) indexHandler, ok := m.getIndexHandlerRLocked(conn)
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
// This should be impossible, as an index handler is registered when // This should be impossible, as an index handler is registered when
// we send a cluster config, and that is what triggers index // we send a cluster config, and that is what triggers index
@ -1255,7 +1239,7 @@ func (m *model) ClusterConfig(conn protocol.Connection, cm protocol.ClusterConfi
break break
} }
// Needs to happen outside of the fmut, as can cause CommitConfiguration // Needs to happen outside of the mut, as can cause CommitConfiguration
if deviceCfg.AutoAcceptFolders { if deviceCfg.AutoAcceptFolders {
w, _ := m.cfg.Modify(func(cfg *config.Configuration) { w, _ := m.cfg.Modify(func(cfg *config.Configuration) {
changedFcfg := make(map[string]config.FolderConfiguration) changedFcfg := make(map[string]config.FolderConfiguration)
@ -1291,9 +1275,9 @@ func (m *model) ClusterConfig(conn protocol.Connection, cm protocol.ClusterConfi
return err return err
} }
m.fmut.Lock() m.mut.Lock()
m.remoteFolderStates[deviceID] = states m.remoteFolderStates[deviceID] = states
m.fmut.Unlock() m.mut.Unlock()
m.evLogger.Log(events.ClusterConfigReceived, ClusterConfigReceivedEventData{ m.evLogger.Log(events.ClusterConfigReceived, ClusterConfigReceivedEventData{
Device: deviceID, Device: deviceID,
@ -1302,11 +1286,11 @@ func (m *model) ClusterConfig(conn protocol.Connection, cm protocol.ClusterConfi
if len(tempIndexFolders) > 0 { if len(tempIndexFolders) > 0 {
var connOK bool var connOK bool
var conn protocol.Connection var conn protocol.Connection
m.fmut.RLock() m.mut.RLock()
if connIDs, connIDOK := m.deviceConnIDs[deviceID]; connIDOK { if connIDs, connIDOK := m.deviceConnIDs[deviceID]; connIDOK {
conn, connOK = m.connections[connIDs[0]] conn, connOK = m.connections[connIDs[0]]
} }
m.fmut.RUnlock() m.mut.RUnlock()
// In case we've got ClusterConfig, and the connection disappeared // In case we've got ClusterConfig, and the connection disappeared
// from infront of our nose. // from infront of our nose.
if connOK { if connOK {
@ -1339,8 +1323,8 @@ func (m *model) ensureIndexHandler(conn protocol.Connection) *indexHandlerRegist
deviceID := conn.DeviceID() deviceID := conn.DeviceID()
connID := conn.ConnectionID() connID := conn.ConnectionID()
m.fmut.Lock() m.mut.Lock()
defer m.fmut.Unlock() defer m.mut.Unlock()
indexHandlerRegistry, ok := m.indexHandlers.Get(deviceID) indexHandlerRegistry, ok := m.indexHandlers.Get(deviceID)
if ok && indexHandlerRegistry.conn.ConnectionID() == connID { if ok && indexHandlerRegistry.conn.ConnectionID() == connID {
@ -1370,8 +1354,8 @@ func (m *model) ensureIndexHandler(conn protocol.Connection) *indexHandlerRegist
return indexHandlerRegistry return indexHandlerRegistry
} }
func (m *model) getIndexHandlerPRLocked(conn protocol.Connection) (*indexHandlerRegistry, bool) { func (m *model) getIndexHandlerRLocked(conn protocol.Connection) (*indexHandlerRegistry, bool) {
// Reads from index handlers, which requires pmut to be read locked // Reads from index handlers, which requires the mutex to be read locked
deviceID := conn.DeviceID() deviceID := conn.DeviceID()
connID := conn.ConnectionID() connID := conn.ConnectionID()
@ -1450,14 +1434,14 @@ func (m *model) ccHandleFolders(folders []protocol.Folder, deviceCfg config.Devi
if err := m.ccCheckEncryption(cfg, folderDevice, ccDeviceInfos[folder.ID], deviceCfg.Untrusted); err != nil { if err := m.ccCheckEncryption(cfg, folderDevice, ccDeviceInfos[folder.ID], deviceCfg.Untrusted); err != nil {
sameError := false sameError := false
m.fmut.Lock() m.mut.Lock()
if devs, ok := m.folderEncryptionFailures[folder.ID]; ok { if devs, ok := m.folderEncryptionFailures[folder.ID]; ok {
sameError = devs[deviceID] == err sameError = devs[deviceID] == err
} else { } else {
m.folderEncryptionFailures[folder.ID] = make(map[protocol.DeviceID]error) m.folderEncryptionFailures[folder.ID] = make(map[protocol.DeviceID]error)
} }
m.folderEncryptionFailures[folder.ID][deviceID] = err m.folderEncryptionFailures[folder.ID][deviceID] = err
m.fmut.Unlock() m.mut.Unlock()
msg := fmt.Sprintf("Failure checking encryption consistency with device %v for folder %v: %v", deviceID, cfg.Description(), err) msg := fmt.Sprintf("Failure checking encryption consistency with device %v for folder %v: %v", deviceID, cfg.Description(), err)
if sameError { if sameError {
l.Debugln(msg) l.Debugln(msg)
@ -1470,7 +1454,7 @@ func (m *model) ccHandleFolders(folders []protocol.Folder, deviceCfg config.Devi
} }
return tempIndexFolders, seenFolders, err return tempIndexFolders, seenFolders, err
} }
m.fmut.Lock() m.mut.Lock()
if devErrs, ok := m.folderEncryptionFailures[folder.ID]; ok { if devErrs, ok := m.folderEncryptionFailures[folder.ID]; ok {
if len(devErrs) == 1 { if len(devErrs) == 1 {
delete(m.folderEncryptionFailures, folder.ID) delete(m.folderEncryptionFailures, folder.ID)
@ -1478,7 +1462,7 @@ func (m *model) ccHandleFolders(folders []protocol.Folder, deviceCfg config.Devi
delete(m.folderEncryptionFailures[folder.ID], deviceID) delete(m.folderEncryptionFailures[folder.ID], deviceID)
} }
} }
m.fmut.Unlock() m.mut.Unlock()
// Handle indexes // Handle indexes
@ -1582,9 +1566,9 @@ func (m *model) ccCheckEncryption(fcfg config.FolderConfiguration, folderDevice
// hasTokenRemote == true // hasTokenRemote == true
ccToken = ccDeviceInfos.remote.EncryptionPasswordToken ccToken = ccDeviceInfos.remote.EncryptionPasswordToken
} }
m.fmut.RLock() m.mut.RLock()
token, ok := m.folderEncryptionPasswordTokens[fcfg.ID] token, ok := m.folderEncryptionPasswordTokens[fcfg.ID]
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
var err error var err error
token, err = readEncryptionToken(fcfg) token, err = readEncryptionToken(fcfg)
@ -1598,9 +1582,9 @@ func (m *model) ccCheckEncryption(fcfg config.FolderConfiguration, folderDevice
} }
} }
if err == nil { if err == nil {
m.fmut.Lock() m.mut.Lock()
m.folderEncryptionPasswordTokens[fcfg.ID] = token m.folderEncryptionPasswordTokens[fcfg.ID] = token
m.fmut.Unlock() m.mut.Unlock()
} else { } else {
if err := writeEncryptionToken(ccToken, fcfg); err != nil { if err := writeEncryptionToken(ccToken, fcfg); err != nil {
if rerr, ok := redactPathError(err); ok { if rerr, ok := redactPathError(err); ok {
@ -1612,9 +1596,9 @@ func (m *model) ccCheckEncryption(fcfg config.FolderConfiguration, folderDevice
} }
} }
} }
m.fmut.Lock() m.mut.Lock()
m.folderEncryptionPasswordTokens[fcfg.ID] = ccToken m.folderEncryptionPasswordTokens[fcfg.ID] = ccToken
m.fmut.Unlock() m.mut.Unlock()
// We can only announce ourselves once we have the token, // We can only announce ourselves once we have the token,
// thus we need to resend CCs now that we have it. // thus we need to resend CCs now that we have it.
m.sendClusterConfig(fcfg.DeviceIDs()) m.sendClusterConfig(fcfg.DeviceIDs())
@ -1632,14 +1616,14 @@ func (m *model) sendClusterConfig(ids []protocol.DeviceID) {
return return
} }
ccConns := make([]protocol.Connection, 0, len(ids)) ccConns := make([]protocol.Connection, 0, len(ids))
m.fmut.RLock() m.mut.RLock()
for _, id := range ids { for _, id := range ids {
if connIDs, ok := m.deviceConnIDs[id]; ok { if connIDs, ok := m.deviceConnIDs[id]; ok {
ccConns = append(ccConns, m.connections[connIDs[0]]) ccConns = append(ccConns, m.connections[connIDs[0]])
} }
} }
m.fmut.RUnlock() m.mut.RUnlock()
// Generating cluster-configs acquires fmut -> must happen outside of pmut. // Generating cluster-configs acquires the mutex.
for _, conn := range ccConns { for _, conn := range ccConns {
cm, passwords := m.generateClusterConfig(conn.DeviceID()) cm, passwords := m.generateClusterConfig(conn.DeviceID())
conn.SetFolderPasswords(passwords) conn.SetFolderPasswords(passwords)
@ -1875,10 +1859,10 @@ func (m *model) Closed(conn protocol.Connection, err error) {
connID := conn.ConnectionID() connID := conn.ConnectionID()
deviceID := conn.DeviceID() deviceID := conn.DeviceID()
m.fmut.Lock() m.mut.Lock()
conn, ok := m.connections[connID] conn, ok := m.connections[connID]
if !ok { if !ok {
m.fmut.Unlock() m.mut.Unlock()
return return
} }
@ -1909,14 +1893,14 @@ func (m *model) Closed(conn protocol.Connection, err error) {
m.deviceConnIDs[deviceID] = remainingConns m.deviceConnIDs[deviceID] = remainingConns
} }
m.fmut.Unlock() m.mut.Unlock()
if wait != nil { if wait != nil {
<-wait <-wait
} }
m.fmut.RLock() m.mut.RLock()
m.deviceDidCloseFRLocked(deviceID, time.Since(conn.EstablishedAt())) m.deviceDidCloseRLocked(deviceID, time.Since(conn.EstablishedAt()))
m.fmut.RUnlock() m.mut.RUnlock()
k := map[bool]string{false: "secondary", true: "primary"}[removedIsPrimary] k := map[bool]string{false: "secondary", true: "primary"}[removedIsPrimary]
l.Infof("Lost %s connection to %s at %s: %v (%d remain)", k, deviceID.Short(), conn, err, len(remainingConns)) l.Infof("Lost %s connection to %s at %s: %v (%d remain)", k, deviceID.Short(), conn, err, len(remainingConns))
@ -1969,10 +1953,10 @@ func (m *model) Request(conn protocol.Connection, folder, name string, _, size i
deviceID := conn.DeviceID() deviceID := conn.DeviceID()
m.fmut.RLock() m.mut.RLock()
folderCfg, ok := m.folderCfgs[folder] folderCfg, ok := m.folderCfgs[folder]
folderIgnores := m.folderIgnores[folder] folderIgnores := m.folderIgnores[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
// The folder might be already unpaused in the config, but not yet // The folder might be already unpaused in the config, but not yet
// in the model. // in the model.
@ -2011,9 +1995,9 @@ func (m *model) Request(conn protocol.Connection, folder, name string, _, size i
// Restrict parallel requests by connection/device // Restrict parallel requests by connection/device
m.fmut.RLock() m.mut.RLock()
limiter := m.connRequestLimiters[deviceID] limiter := m.connRequestLimiters[deviceID]
m.fmut.RUnlock() m.mut.RUnlock()
// The requestResponse releases the bytes to the buffer pool and the // The requestResponse releases the bytes to the buffer pool and the
// limiters when its Close method is called. // limiters when its Close method is called.
@ -2142,9 +2126,9 @@ func (m *model) recheckFile(deviceID protocol.DeviceID, folder, name string, off
// to what we have in the database, yet the content we've read off the filesystem doesn't // to what we have in the database, yet the content we've read off the filesystem doesn't
// Something is fishy, invalidate the file and rescan it. // Something is fishy, invalidate the file and rescan it.
// The file will temporarily become invalid, which is ok as the content is messed up. // The file will temporarily become invalid, which is ok as the content is messed up.
m.fmut.RLock() m.mut.RLock()
runner, ok := m.folderRunners.Get(folder) runner, ok := m.folderRunners.Get(folder)
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
l.Debugf("%v recheckFile: %s: %q / %q: Folder stopped before rescan could be scheduled", m, deviceID, folder, name) l.Debugf("%v recheckFile: %s: %q / %q: Folder stopped before rescan could be scheduled", m, deviceID, folder, name)
return return
@ -2156,9 +2140,9 @@ func (m *model) recheckFile(deviceID protocol.DeviceID, folder, name string, off
} }
func (m *model) CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool, error) { func (m *model) CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool, error) {
m.fmut.RLock() m.mut.RLock()
fs, ok := m.folderFiles[folder] fs, ok := m.folderFiles[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
return protocol.FileInfo{}, false, ErrFolderMissing return protocol.FileInfo{}, false, ErrFolderMissing
} }
@ -2172,9 +2156,9 @@ func (m *model) CurrentFolderFile(folder string, file string) (protocol.FileInfo
} }
func (m *model) CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool, error) { func (m *model) CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool, error) {
m.fmut.RLock() m.mut.RLock()
ffs, ok := m.folderFiles[folder] ffs, ok := m.folderFiles[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
return protocol.FileInfo{}, false, ErrFolderMissing return protocol.FileInfo{}, false, ErrFolderMissing
} }
@ -2188,10 +2172,10 @@ func (m *model) CurrentGlobalFile(folder string, file string) (protocol.FileInfo
} }
func (m *model) GetMtimeMapping(folder string, file string) (fs.MtimeMapping, error) { func (m *model) GetMtimeMapping(folder string, file string) (fs.MtimeMapping, error) {
m.fmut.RLock() m.mut.RLock()
ffs, ok := m.folderFiles[folder] ffs, ok := m.folderFiles[folder]
fcfg := m.folderCfgs[folder] fcfg := m.folderCfgs[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
return fs.MtimeMapping{}, ErrFolderMissing return fs.MtimeMapping{}, ErrFolderMissing
} }
@ -2200,19 +2184,19 @@ func (m *model) GetMtimeMapping(folder string, file string) (fs.MtimeMapping, er
// Connection returns if we are connected to the given device. // Connection returns if we are connected to the given device.
func (m *model) ConnectedTo(deviceID protocol.DeviceID) bool { func (m *model) ConnectedTo(deviceID protocol.DeviceID) bool {
m.fmut.RLock() m.mut.RLock()
_, ok := m.deviceConnIDs[deviceID] _, ok := m.deviceConnIDs[deviceID]
m.fmut.RUnlock() m.mut.RUnlock()
return ok return ok
} }
// LoadIgnores loads or refreshes the ignore patterns from disk, if the // LoadIgnores loads or refreshes the ignore patterns from disk, if the
// folder is healthy, and returns the refreshed lines and patterns. // folder is healthy, and returns the refreshed lines and patterns.
func (m *model) LoadIgnores(folder string) ([]string, []string, error) { func (m *model) LoadIgnores(folder string) ([]string, []string, error) {
m.fmut.RLock() m.mut.RLock()
cfg, cfgOk := m.folderCfgs[folder] cfg, cfgOk := m.folderCfgs[folder]
ignores, ignoresOk := m.folderIgnores[folder] ignores, ignoresOk := m.folderIgnores[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if !cfgOk { if !cfgOk {
cfg, cfgOk = m.cfg.Folder(folder) cfg, cfgOk = m.cfg.Folder(folder)
@ -2244,10 +2228,10 @@ func (m *model) LoadIgnores(folder string) ([]string, []string, error) {
// whichever it may be. No attempt is made to load or refresh ignore // whichever it may be. No attempt is made to load or refresh ignore
// patterns from disk. // patterns from disk.
func (m *model) CurrentIgnores(folder string) ([]string, []string, error) { func (m *model) CurrentIgnores(folder string) ([]string, []string, error) {
m.fmut.RLock() m.mut.RLock()
_, cfgOk := m.folderCfgs[folder] _, cfgOk := m.folderCfgs[folder]
ignores, ignoresOk := m.folderIgnores[folder] ignores, ignoresOk := m.folderIgnores[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if !cfgOk { if !cfgOk {
return nil, nil, fmt.Errorf("folder %s does not exist", folder) return nil, nil, fmt.Errorf("folder %s does not exist", folder)
@ -2286,9 +2270,9 @@ func (m *model) setIgnores(cfg config.FolderConfiguration, content []string) err
return err return err
} }
m.fmut.RLock() m.mut.RLock()
runner, ok := m.folderRunners.Get(cfg.ID) runner, ok := m.folderRunners.Get(cfg.ID)
m.fmut.RUnlock() m.mut.RUnlock()
if ok { if ok {
runner.ScheduleScan() runner.ScheduleScan()
} }
@ -2335,7 +2319,7 @@ func (m *model) AddConnection(conn protocol.Connection, hello protocol.Hello) {
connID := conn.ConnectionID() connID := conn.ConnectionID()
closed := make(chan struct{}) closed := make(chan struct{})
m.fmut.Lock() m.mut.Lock()
m.connections[connID] = conn m.connections[connID] = conn
m.closed[connID] = closed m.closed[connID] = closed
@ -2366,7 +2350,7 @@ func (m *model) AddConnection(conn protocol.Connection, hello protocol.Hello) {
l.Infof(`Additional connection (+%d) for device %s at %s`, len(m.deviceConnIDs[deviceID])-1, deviceID.Short(), conn) l.Infof(`Additional connection (+%d) for device %s at %s`, len(m.deviceConnIDs[deviceID])-1, deviceID.Short(), conn)
} }
m.fmut.Unlock() m.mut.Unlock()
if (deviceCfg.Name == "" || m.cfg.Options().OverwriteRemoteDevNames) && hello.DeviceName != "" { if (deviceCfg.Name == "" || m.cfg.Options().OverwriteRemoteDevNames) && hello.DeviceName != "" {
m.cfg.Modify(func(cfg *config.Configuration) { m.cfg.Modify(func(cfg *config.Configuration) {
@ -2397,11 +2381,11 @@ func (m *model) scheduleConnectionPromotion() {
// be called after adding new connections, and after closing a primary // be called after adding new connections, and after closing a primary
// device connection. // device connection.
func (m *model) promoteConnections() { func (m *model) promoteConnections() {
m.fmut.Lock() m.mut.Lock()
defer m.fmut.Unlock() defer m.mut.Unlock()
for deviceID, connIDs := range m.deviceConnIDs { for deviceID, connIDs := range m.deviceConnIDs {
cm, passwords := m.generateClusterConfigFRLocked(deviceID) cm, passwords := m.generateClusterConfigRLocked(deviceID)
if m.promotedConnID[deviceID] != connIDs[0] { if m.promotedConnID[deviceID] != connIDs[0] {
// The previously promoted connection is not the current // The previously promoted connection is not the current
// primary; we should promote the primary connection to be the // primary; we should promote the primary connection to be the
@ -2435,17 +2419,17 @@ func (m *model) promoteConnections() {
func (m *model) DownloadProgress(conn protocol.Connection, folder string, updates []protocol.FileDownloadProgressUpdate) error { func (m *model) DownloadProgress(conn protocol.Connection, folder string, updates []protocol.FileDownloadProgressUpdate) error {
deviceID := conn.DeviceID() deviceID := conn.DeviceID()
m.fmut.RLock() m.mut.RLock()
cfg, ok := m.folderCfgs[folder] cfg, ok := m.folderCfgs[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if !ok || cfg.DisableTempIndexes || !cfg.SharedWith(deviceID) { if !ok || cfg.DisableTempIndexes || !cfg.SharedWith(deviceID) {
return nil return nil
} }
m.fmut.RLock() m.mut.RLock()
downloads := m.deviceDownloads[deviceID] downloads := m.deviceDownloads[deviceID]
m.fmut.RUnlock() m.mut.RUnlock()
downloads.Update(folder, updates) downloads.Update(folder, updates)
state := downloads.GetBlockCounts(folder) state := downloads.GetBlockCounts(folder)
@ -2459,15 +2443,15 @@ func (m *model) DownloadProgress(conn protocol.Connection, folder string, update
} }
func (m *model) deviceWasSeen(deviceID protocol.DeviceID) { func (m *model) deviceWasSeen(deviceID protocol.DeviceID) {
m.fmut.RLock() m.mut.RLock()
sr, ok := m.deviceStatRefs[deviceID] sr, ok := m.deviceStatRefs[deviceID]
m.fmut.RUnlock() m.mut.RUnlock()
if ok { if ok {
_ = sr.WasSeen() _ = sr.WasSeen()
} }
} }
func (m *model) deviceDidCloseFRLocked(deviceID protocol.DeviceID, duration time.Duration) { func (m *model) deviceDidCloseRLocked(deviceID protocol.DeviceID, duration time.Duration) {
if sr, ok := m.deviceStatRefs[deviceID]; ok { if sr, ok := m.deviceStatRefs[deviceID]; ok {
_ = sr.LastConnectionDuration(duration) _ = sr.LastConnectionDuration(duration)
_ = sr.WasSeen() _ = sr.WasSeen()
@ -2490,8 +2474,8 @@ func (m *model) requestGlobal(ctx context.Context, deviceID protocol.DeviceID, f
// ("primary") connection, which is dedicated to index data, and pick a // ("primary") connection, which is dedicated to index data, and pick a
// random one of the others. // random one of the others.
func (m *model) requestConnectionForDevice(deviceID protocol.DeviceID) (protocol.Connection, bool) { func (m *model) requestConnectionForDevice(deviceID protocol.DeviceID) (protocol.Connection, bool) {
m.fmut.RLock() m.mut.RLock()
defer m.fmut.RUnlock() defer m.mut.RUnlock()
connIDs, ok := m.deviceConnIDs[deviceID] connIDs, ok := m.deviceConnIDs[deviceID]
if !ok { if !ok {
@ -2512,12 +2496,12 @@ func (m *model) requestConnectionForDevice(deviceID protocol.DeviceID) (protocol
} }
func (m *model) ScanFolders() map[string]error { func (m *model) ScanFolders() map[string]error {
m.fmut.RLock() m.mut.RLock()
folders := make([]string, 0, len(m.folderCfgs)) folders := make([]string, 0, len(m.folderCfgs))
for folder := range m.folderCfgs { for folder := range m.folderCfgs {
folders = append(folders, folder) folders = append(folders, folder)
} }
m.fmut.RUnlock() m.mut.RUnlock()
errors := make(map[string]error, len(m.folderCfgs)) errors := make(map[string]error, len(m.folderCfgs))
errorsMut := sync.NewMutex() errorsMut := sync.NewMutex()
@ -2545,10 +2529,10 @@ func (m *model) ScanFolder(folder string) error {
} }
func (m *model) ScanFolderSubdirs(folder string, subs []string) error { func (m *model) ScanFolderSubdirs(folder string, subs []string) error {
m.fmut.RLock() m.mut.RLock()
err := m.checkFolderRunningLocked(folder) err := m.checkFolderRunningRLocked(folder)
runner, _ := m.folderRunners.Get(folder) runner, _ := m.folderRunners.Get(folder)
m.fmut.RUnlock() m.mut.RUnlock()
if err != nil { if err != nil {
return err return err
@ -2558,9 +2542,9 @@ func (m *model) ScanFolderSubdirs(folder string, subs []string) error {
} }
func (m *model) DelayScan(folder string, next time.Duration) { func (m *model) DelayScan(folder string, next time.Duration) {
m.fmut.RLock() m.mut.RLock()
runner, ok := m.folderRunners.Get(folder) runner, ok := m.folderRunners.Get(folder)
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
return return
} }
@ -2570,10 +2554,10 @@ func (m *model) DelayScan(folder string, next time.Duration) {
// numHashers returns the number of hasher routines to use for a given folder, // numHashers returns the number of hasher routines to use for a given folder,
// taking into account configuration and available CPU cores. // taking into account configuration and available CPU cores.
func (m *model) numHashers(folder string) int { func (m *model) numHashers(folder string) int {
m.fmut.RLock() m.mut.RLock()
folderCfg := m.folderCfgs[folder] folderCfg := m.folderCfgs[folder]
numFolders := len(m.folderCfgs) numFolders := len(m.folderCfgs)
m.fmut.RUnlock() m.mut.RUnlock()
if folderCfg.Hashers > 0 { if folderCfg.Hashers > 0 {
// Specific value set in the config, use that. // Specific value set in the config, use that.
@ -2599,12 +2583,12 @@ func (m *model) numHashers(folder string) int {
// generateClusterConfig returns a ClusterConfigMessage that is correct and the // generateClusterConfig returns a ClusterConfigMessage that is correct and the
// set of folder passwords for the given peer device // set of folder passwords for the given peer device
func (m *model) generateClusterConfig(device protocol.DeviceID) (protocol.ClusterConfig, map[string]string) { func (m *model) generateClusterConfig(device protocol.DeviceID) (protocol.ClusterConfig, map[string]string) {
m.fmut.RLock() m.mut.RLock()
defer m.fmut.RUnlock() defer m.mut.RUnlock()
return m.generateClusterConfigFRLocked(device) return m.generateClusterConfigRLocked(device)
} }
func (m *model) generateClusterConfigFRLocked(device protocol.DeviceID) (protocol.ClusterConfig, map[string]string) { func (m *model) generateClusterConfigRLocked(device protocol.DeviceID) (protocol.ClusterConfig, map[string]string) {
var message protocol.ClusterConfig var message protocol.ClusterConfig
folders := m.cfg.FolderList() folders := m.cfg.FolderList()
passwords := make(map[string]string, len(folders)) passwords := make(map[string]string, len(folders))
@ -2678,9 +2662,9 @@ func (m *model) generateClusterConfigFRLocked(device protocol.DeviceID) (protoco
} }
func (m *model) State(folder string) (string, time.Time, error) { func (m *model) State(folder string) (string, time.Time, error) {
m.fmut.RLock() m.mut.RLock()
runner, ok := m.folderRunners.Get(folder) runner, ok := m.folderRunners.Get(folder)
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
// The returned error should be an actual folder error, so returning // The returned error should be an actual folder error, so returning
// errors.New("does not exist") or similar here would be // errors.New("does not exist") or similar here would be
@ -2692,10 +2676,10 @@ func (m *model) State(folder string) (string, time.Time, error) {
} }
func (m *model) FolderErrors(folder string) ([]FileError, error) { func (m *model) FolderErrors(folder string) ([]FileError, error) {
m.fmut.RLock() m.mut.RLock()
err := m.checkFolderRunningLocked(folder) err := m.checkFolderRunningRLocked(folder)
runner, _ := m.folderRunners.Get(folder) runner, _ := m.folderRunners.Get(folder)
m.fmut.RUnlock() m.mut.RUnlock()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2703,10 +2687,10 @@ func (m *model) FolderErrors(folder string) ([]FileError, error) {
} }
func (m *model) WatchError(folder string) error { func (m *model) WatchError(folder string) error {
m.fmut.RLock() m.mut.RLock()
err := m.checkFolderRunningLocked(folder) err := m.checkFolderRunningRLocked(folder)
runner, _ := m.folderRunners.Get(folder) runner, _ := m.folderRunners.Get(folder)
m.fmut.RUnlock() m.mut.RUnlock()
if err != nil { if err != nil {
return nil // If the folder isn't running, there's no error to report. return nil // If the folder isn't running, there's no error to report.
} }
@ -2716,9 +2700,9 @@ func (m *model) WatchError(folder string) error {
func (m *model) Override(folder string) { func (m *model) Override(folder string) {
// Grab the runner and the file set. // Grab the runner and the file set.
m.fmut.RLock() m.mut.RLock()
runner, ok := m.folderRunners.Get(folder) runner, ok := m.folderRunners.Get(folder)
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
return return
} }
@ -2731,9 +2715,9 @@ func (m *model) Override(folder string) {
func (m *model) Revert(folder string) { func (m *model) Revert(folder string) {
// Grab the runner and the file set. // Grab the runner and the file set.
m.fmut.RLock() m.mut.RLock()
runner, ok := m.folderRunners.Get(folder) runner, ok := m.folderRunners.Get(folder)
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
return return
} }
@ -2761,9 +2745,9 @@ func findByName(slice []*TreeEntry, name string) *TreeEntry {
} }
func (m *model) GlobalDirectoryTree(folder, prefix string, levels int, dirsOnly bool) ([]*TreeEntry, error) { func (m *model) GlobalDirectoryTree(folder, prefix string, levels int, dirsOnly bool) ([]*TreeEntry, error) {
m.fmut.RLock() m.mut.RLock()
files, ok := m.folderFiles[folder] files, ok := m.folderFiles[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
return nil, ErrFolderMissing return nil, ErrFolderMissing
} }
@ -2833,10 +2817,10 @@ func (m *model) GlobalDirectoryTree(folder, prefix string, levels int, dirsOnly
} }
func (m *model) GetFolderVersions(folder string) (map[string][]versioner.FileVersion, error) { func (m *model) GetFolderVersions(folder string) (map[string][]versioner.FileVersion, error) {
m.fmut.RLock() m.mut.RLock()
err := m.checkFolderRunningLocked(folder) err := m.checkFolderRunningRLocked(folder)
ver := m.folderVersioners[folder] ver := m.folderVersioners[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2848,11 +2832,11 @@ func (m *model) GetFolderVersions(folder string) (map[string][]versioner.FileVer
} }
func (m *model) RestoreFolderVersions(folder string, versions map[string]time.Time) (map[string]error, error) { func (m *model) RestoreFolderVersions(folder string, versions map[string]time.Time) (map[string]error, error) {
m.fmut.RLock() m.mut.RLock()
err := m.checkFolderRunningLocked(folder) err := m.checkFolderRunningRLocked(folder)
fcfg := m.folderCfgs[folder] fcfg := m.folderCfgs[folder]
ver := m.folderVersioners[folder] ver := m.folderVersioners[folder]
m.fmut.RUnlock() m.mut.RUnlock()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2877,12 +2861,8 @@ func (m *model) RestoreFolderVersions(folder string, versions map[string]time.Ti
} }
func (m *model) Availability(folder string, file protocol.FileInfo, block protocol.BlockInfo) ([]Availability, error) { func (m *model) Availability(folder string, file protocol.FileInfo, block protocol.BlockInfo) ([]Availability, error) {
// The slightly unusual locking sequence here is because we need to hold m.mut.RLock()
// pmut for the duration (as the value returned from foldersFiles can defer m.mut.RUnlock()
// get heavily modified on Close()), but also must acquire fmut before
// pmut. (The locks can be *released* in any order.)
m.fmut.RLock()
defer m.fmut.RUnlock()
fs, ok := m.folderFiles[folder] fs, ok := m.folderFiles[folder]
cfg := m.folderCfgs[folder] cfg := m.folderCfgs[folder]
@ -2897,16 +2877,16 @@ func (m *model) Availability(folder string, file protocol.FileInfo, block protoc
} }
defer snap.Release() defer snap.Release()
return m.availabilityInSnapshotPRlocked(cfg, snap, file, block), nil return m.availabilityInSnapshotRLocked(cfg, snap, file, block), nil
} }
func (m *model) availabilityInSnapshot(cfg config.FolderConfiguration, snap *db.Snapshot, file protocol.FileInfo, block protocol.BlockInfo) []Availability { func (m *model) availabilityInSnapshot(cfg config.FolderConfiguration, snap *db.Snapshot, file protocol.FileInfo, block protocol.BlockInfo) []Availability {
m.fmut.RLock() m.mut.RLock()
defer m.fmut.RUnlock() defer m.mut.RUnlock()
return m.availabilityInSnapshotPRlocked(cfg, snap, file, block) return m.availabilityInSnapshotRLocked(cfg, snap, file, block)
} }
func (m *model) availabilityInSnapshotPRlocked(cfg config.FolderConfiguration, snap *db.Snapshot, file protocol.FileInfo, block protocol.BlockInfo) []Availability { func (m *model) availabilityInSnapshotRLocked(cfg config.FolderConfiguration, snap *db.Snapshot, file protocol.FileInfo, block protocol.BlockInfo) []Availability {
var availabilities []Availability var availabilities []Availability
for _, device := range snap.Availability(file.Name) { for _, device := range snap.Availability(file.Name) {
if _, ok := m.remoteFolderStates[device]; !ok { if _, ok := m.remoteFolderStates[device]; !ok {
@ -2932,9 +2912,9 @@ func (m *model) availabilityInSnapshotPRlocked(cfg config.FolderConfiguration, s
// BringToFront bumps the given files priority in the job queue. // BringToFront bumps the given files priority in the job queue.
func (m *model) BringToFront(folder, file string) { func (m *model) BringToFront(folder, file string) {
m.fmut.RLock() m.mut.RLock()
runner, ok := m.folderRunners.Get(folder) runner, ok := m.folderRunners.Get(folder)
m.fmut.RUnlock() m.mut.RUnlock()
if ok { if ok {
runner.BringToFront(file) runner.BringToFront(file)
@ -2942,8 +2922,8 @@ func (m *model) BringToFront(folder, file string) {
} }
func (m *model) ResetFolder(folder string) error { func (m *model) ResetFolder(folder string) error {
m.fmut.RLock() m.mut.RLock()
defer m.fmut.RUnlock() defer m.mut.RUnlock()
_, ok := m.folderRunners.Get(folder) _, ok := m.folderRunners.Get(folder)
if ok { if ok {
return errors.New("folder must be paused when resetting") return errors.New("folder must be paused when resetting")
@ -3037,9 +3017,9 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
// If we don't have the encryption token yet, we need to drop // If we don't have the encryption token yet, we need to drop
// the connection to make the remote re-send the cluster-config // the connection to make the remote re-send the cluster-config
// and with it the token. // and with it the token.
m.fmut.RLock() m.mut.RLock()
_, ok := m.folderEncryptionPasswordTokens[toCfg.ID] _, ok := m.folderEncryptionPasswordTokens[toCfg.ID]
m.fmut.RUnlock() m.mut.RUnlock()
if !ok { if !ok {
closeDevices = append(closeDevices, toCfg.DeviceIDs()...) closeDevices = append(closeDevices, toCfg.DeviceIDs()...)
} else { } else {
@ -3065,9 +3045,9 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
fromCfg, ok := fromDevices[deviceID] fromCfg, ok := fromDevices[deviceID]
if !ok { if !ok {
sr := stats.NewDeviceStatisticsReference(m.db, deviceID) sr := stats.NewDeviceStatisticsReference(m.db, deviceID)
m.fmut.Lock() m.mut.Lock()
m.deviceStatRefs[deviceID] = sr m.deviceStatRefs[deviceID] = sr
m.fmut.Unlock() m.mut.Unlock()
continue continue
} }
delete(fromDevices, deviceID) delete(fromDevices, deviceID)
@ -3090,23 +3070,23 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
} }
if toCfg.MaxRequestKiB != fromCfg.MaxRequestKiB { if toCfg.MaxRequestKiB != fromCfg.MaxRequestKiB {
m.fmut.Lock() m.mut.Lock()
m.setConnRequestLimitersPLocked(toCfg) m.setConnRequestLimitersLocked(toCfg)
m.fmut.Unlock() m.mut.Unlock()
} }
} }
// Clean up after removed devices // Clean up after removed devices
removedDevices := make([]protocol.DeviceID, 0, len(fromDevices)) removedDevices := make([]protocol.DeviceID, 0, len(fromDevices))
m.fmut.Lock() m.mut.Lock()
for deviceID := range fromDevices { for deviceID := range fromDevices {
delete(m.deviceStatRefs, deviceID) delete(m.deviceStatRefs, deviceID)
removedDevices = append(removedDevices, deviceID) removedDevices = append(removedDevices, deviceID)
delete(clusterConfigDevices, deviceID) delete(clusterConfigDevices, deviceID)
} }
m.fmut.Unlock() m.mut.Unlock()
m.fmut.RLock() m.mut.RLock()
for _, id := range closeDevices { for _, id := range closeDevices {
delete(clusterConfigDevices, id) delete(clusterConfigDevices, id)
if conns, ok := m.deviceConnIDs[id]; ok { if conns, ok := m.deviceConnIDs[id]; ok {
@ -3123,8 +3103,8 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
} }
} }
} }
m.fmut.RUnlock() m.mut.RUnlock()
// Generating cluster-configs acquires fmut -> must happen outside of pmut. // Generating cluster-configs acquires the mutex.
m.sendClusterConfig(clusterConfigDevices.AsSlice()) m.sendClusterConfig(clusterConfigDevices.AsSlice())
ignoredDevices := observedDeviceSet(to.IgnoredDevices) ignoredDevices := observedDeviceSet(to.IgnoredDevices)
@ -3144,8 +3124,8 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
return true return true
} }
func (m *model) setConnRequestLimitersPLocked(cfg config.DeviceConfiguration) { func (m *model) setConnRequestLimitersLocked(cfg config.DeviceConfiguration) {
// Touches connRequestLimiters which is protected by pmut. // Touches connRequestLimiters which is protected by the mutex.
// 0: default, <0: no limiting // 0: default, <0: no limiting
switch { switch {
case cfg.MaxRequestKiB > 0: case cfg.MaxRequestKiB > 0:
@ -3251,10 +3231,10 @@ func (m *model) cleanPending(existingDevices map[protocol.DeviceID]config.Device
} }
} }
// checkFolderRunningLocked returns nil if the folder is up and running and a // checkFolderRunningRLocked returns nil if the folder is up and running and a
// descriptive error if not. // descriptive error if not.
// Need to hold (read) lock on m.fmut when calling this. // Need to hold (read) lock on m.mut when calling this.
func (m *model) checkFolderRunningLocked(folder string) error { func (m *model) checkFolderRunningRLocked(folder string) error {
_, ok := m.folderRunners.Get(folder) _, ok := m.folderRunners.Get(folder)
if ok { if ok {
return nil return nil

View File

@ -902,13 +902,13 @@ func TestIssue5063(t *testing.T) {
defer cleanupModel(m) defer cleanupModel(m)
defer cancel() defer cancel()
m.fmut.Lock() m.mut.Lock()
for _, c := range m.connections { for _, c := range m.connections {
conn := c.(*fakeConnection) conn := c.(*fakeConnection)
conn.CloseCalls(func(_ error) {}) conn.CloseCalls(func(_ error) {})
defer m.Closed(c, errStopped) // to unblock deferred m.Stop() defer m.Closed(c, errStopped) // to unblock deferred m.Stop()
} }
m.fmut.Unlock() m.mut.Unlock()
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
@ -1524,10 +1524,10 @@ func TestIgnores(t *testing.T) {
FilesystemType: fs.FilesystemTypeFake, FilesystemType: fs.FilesystemTypeFake,
} }
ignores := ignore.New(fcfg.Filesystem(nil), ignore.WithCache(m.cfg.Options().CacheIgnoredFiles)) ignores := ignore.New(fcfg.Filesystem(nil), ignore.WithCache(m.cfg.Options().CacheIgnoredFiles))
m.fmut.Lock() m.mut.Lock()
m.folderCfgs[fcfg.ID] = fcfg m.folderCfgs[fcfg.ID] = fcfg
m.folderIgnores[fcfg.ID] = ignores m.folderIgnores[fcfg.ID] = ignores
m.fmut.Unlock() m.mut.Unlock()
_, _, err = m.LoadIgnores("fresh") _, _, err = m.LoadIgnores("fresh")
if err != nil { if err != nil {
@ -2973,7 +2973,7 @@ func TestConnCloseOnRestart(t *testing.T) {
ci := &protocolmocks.ConnectionInfo{} ci := &protocolmocks.ConnectionInfo{}
ci.ConnectionIDReturns(srand.String(16)) ci.ConnectionIDReturns(srand.String(16))
m.AddConnection(protocol.NewConnection(device1, br, nw, testutil.NoopCloser{}, m, ci, protocol.CompressionNever, nil, m.keyGen), protocol.Hello{}) m.AddConnection(protocol.NewConnection(device1, br, nw, testutil.NoopCloser{}, m, ci, protocol.CompressionNever, nil, m.keyGen), protocol.Hello{})
m.fmut.RLock() m.mut.RLock()
if len(m.closed) != 1 { if len(m.closed) != 1 {
t.Fatalf("Expected just one conn (len(m.closed) == %v)", len(m.closed)) t.Fatalf("Expected just one conn (len(m.closed) == %v)", len(m.closed))
} }
@ -2981,7 +2981,7 @@ func TestConnCloseOnRestart(t *testing.T) {
for _, c := range m.closed { for _, c := range m.closed {
closed = c closed = c
} }
m.fmut.RUnlock() m.mut.RUnlock()
waiter, err := w.RemoveDevice(device1) waiter, err := w.RemoveDevice(device1)
if err != nil { if err != nil {
@ -3074,12 +3074,12 @@ func TestDevicePause(t *testing.T) {
sub := m.evLogger.Subscribe(events.DevicePaused) sub := m.evLogger.Subscribe(events.DevicePaused)
defer sub.Unsubscribe() defer sub.Unsubscribe()
m.fmut.RLock() m.mut.RLock()
var closed chan struct{} var closed chan struct{}
for _, c := range m.closed { for _, c := range m.closed {
closed = c closed = c
} }
m.fmut.RUnlock() m.mut.RUnlock()
pauseDevice(t, m.cfg, device1, true) pauseDevice(t, m.cfg, device1, true)
@ -3754,9 +3754,9 @@ func TestCompletionEmptyGlobal(t *testing.T) {
defer wcfgCancel() defer wcfgCancel()
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem(nil).URI()) defer cleanupModelAndRemoveDir(m, fcfg.Filesystem(nil).URI())
files := []protocol.FileInfo{{Name: "foo", Version: protocol.Vector{}.Update(myID.Short()), Sequence: 1}} files := []protocol.FileInfo{{Name: "foo", Version: protocol.Vector{}.Update(myID.Short()), Sequence: 1}}
m.fmut.Lock() m.mut.Lock()
m.folderFiles[fcfg.ID].Update(protocol.LocalDeviceID, files) m.folderFiles[fcfg.ID].Update(protocol.LocalDeviceID, files)
m.fmut.Unlock() m.mut.Unlock()
files[0].Deleted = true files[0].Deleted = true
files[0].Version = files[0].Version.Update(device1.Short()) files[0].Version = files[0].Version.Update(device1.Short())
must(t, m.IndexUpdate(conn, fcfg.ID, files)) must(t, m.IndexUpdate(conn, fcfg.ID, files))

View File

@ -1287,9 +1287,9 @@ func TestRequestReceiveEncrypted(t *testing.T) {
files := genFiles(2) files := genFiles(2)
files[1].LocalFlags = protocol.FlagLocalReceiveOnly files[1].LocalFlags = protocol.FlagLocalReceiveOnly
m.fmut.RLock() m.mut.RLock()
fset := m.folderFiles[fcfg.ID] fset := m.folderFiles[fcfg.ID]
m.fmut.RUnlock() m.mut.RUnlock()
fset.Update(protocol.LocalDeviceID, files) fset.Update(protocol.LocalDeviceID, files)
indexChan := make(chan []protocol.FileInfo, 10) indexChan := make(chan []protocol.FileInfo, 10)

View File

@ -295,9 +295,9 @@ func folderIgnoresAlwaysReload(t testing.TB, m *testModel, fcfg config.FolderCon
m.removeFolder(fcfg) m.removeFolder(fcfg)
fset := newFileSet(t, fcfg.ID, m.db) fset := newFileSet(t, fcfg.ID, m.db)
ignores := ignore.New(fcfg.Filesystem(nil), ignore.WithCache(true), ignore.WithChangeDetector(newAlwaysChanged())) ignores := ignore.New(fcfg.Filesystem(nil), ignore.WithCache(true), ignore.WithChangeDetector(newAlwaysChanged()))
m.fmut.Lock() m.mut.Lock()
m.addAndStartFolderLockedWithIgnores(fcfg, fset, ignores) m.addAndStartFolderLockedWithIgnores(fcfg, fset, ignores)
m.fmut.Unlock() m.mut.Unlock()
} }
func basicClusterConfig(local, remote protocol.DeviceID, folders ...string) protocol.ClusterConfig { func basicClusterConfig(local, remote protocol.DeviceID, folders ...string) protocol.ClusterConfig {
@ -319,9 +319,9 @@ func basicClusterConfig(local, remote protocol.DeviceID, folders ...string) prot
} }
func localIndexUpdate(m *testModel, folder string, fs []protocol.FileInfo) { func localIndexUpdate(m *testModel, folder string, fs []protocol.FileInfo) {
m.fmut.RLock() m.mut.RLock()
fset := m.folderFiles[folder] fset := m.folderFiles[folder]
m.fmut.RUnlock() m.mut.RUnlock()
fset.Update(protocol.LocalDeviceID, fs) fset.Update(protocol.LocalDeviceID, fs)
seq := fset.Sequence(protocol.LocalDeviceID) seq := fset.Sequence(protocol.LocalDeviceID)

View File

@ -11,100 +11,12 @@ import (
"errors" "errors"
"fmt" "fmt"
"path/filepath" "path/filepath"
"strings"
"sync"
"time" "time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/ur"
) )
type Holdable interface {
Holders() string
}
func newDeadlockDetector(timeout time.Duration, evLogger events.Logger, fatal func(error)) *deadlockDetector {
return &deadlockDetector{
warnTimeout: timeout,
fatalTimeout: 10 * timeout,
lockers: make(map[string]sync.Locker),
evLogger: evLogger,
fatal: fatal,
}
}
type deadlockDetector struct {
warnTimeout, fatalTimeout time.Duration
lockers map[string]sync.Locker
evLogger events.Logger
fatal func(error)
}
func (d *deadlockDetector) Watch(name string, mut sync.Locker) {
d.lockers[name] = mut
go func() {
for {
time.Sleep(d.warnTimeout / 4)
done := make(chan struct{}, 1)
go func() {
mut.Lock()
_ = 1 // empty critical section
mut.Unlock()
done <- struct{}{}
}()
d.watchInner(name, done)
}
}()
}
func (d *deadlockDetector) watchInner(name string, done chan struct{}) {
warn := time.NewTimer(d.warnTimeout)
fatal := time.NewTimer(d.fatalTimeout)
defer func() {
warn.Stop()
fatal.Stop()
}()
select {
case <-warn.C:
failure := ur.FailureDataWithGoroutines(fmt.Sprintf("potential deadlock detected at %s (short timeout)", name))
failure.Extra["timeout"] = d.warnTimeout.String()
d.evLogger.Log(events.Failure, failure)
case <-done:
return
}
select {
case <-fatal.C:
err := fmt.Errorf("potential deadlock detected at %s (long timeout)", name)
failure := ur.FailureDataWithGoroutines(err.Error())
failure.Extra["timeout"] = d.fatalTimeout.String()
others := d.otherHolders()
failure.Extra["other-holders"] = others
d.evLogger.Log(events.Failure, failure)
d.fatal(err)
// Give it a minute to shut down gracefully, maybe shutting down
// can get out of the deadlock (or it's not really a deadlock).
time.Sleep(time.Minute)
panic(fmt.Sprintf("%v:\n%v", err, others))
case <-done:
}
}
func (d *deadlockDetector) otherHolders() string {
var b strings.Builder
for otherName, otherMut := range d.lockers {
if otherHolder, ok := otherMut.(Holdable); ok {
b.WriteString("===" + otherName + "===\n" + otherHolder.Holders() + "\n")
}
}
return b.String()
}
// inWritableDir calls fn(path), while making sure that the directory // inWritableDir calls fn(path), while making sure that the directory
// containing `path` is writable for the duration of the call. // containing `path` is writable for the duration of the call.
func inWritableDir(fn func(string) error, targetFs fs.Filesystem, path string, ignorePerms bool) error { func inWritableDir(fn func(string) error, targetFs fs.Filesystem, path string, ignorePerms bool) error {

View File

@ -11,7 +11,6 @@ import (
"strconv" "strconv"
"time" "time"
deadlock "github.com/sasha-s/go-deadlock"
"github.com/syncthing/syncthing/lib/logger" "github.com/syncthing/syncthing/lib/logger"
) )
@ -23,7 +22,6 @@ var (
// }" variable, as it may be rather performance critical and does // }" variable, as it may be rather performance critical and does
// nonstandard things (from a debug logging PoV). // nonstandard things (from a debug logging PoV).
debug = logger.DefaultLogger.ShouldDebug("sync") debug = logger.DefaultLogger.ShouldDebug("sync")
useDeadlock = false
) )
func init() { func init() {
@ -31,10 +29,4 @@ func init() {
threshold = time.Duration(n) * time.Millisecond threshold = time.Duration(n) * time.Millisecond
} }
l.Debugf("Enabling lock logging at %v threshold", threshold) l.Debugf("Enabling lock logging at %v threshold", threshold)
if n, _ := strconv.Atoi(os.Getenv("STDEADLOCKTIMEOUT")); n > 0 {
deadlock.Opts.DeadlockTimeout = time.Duration(n) * time.Second
l.Debugf("Enabling lock deadlocking at %v", deadlock.Opts.DeadlockTimeout)
useDeadlock = true
}
} }

View File

@ -15,8 +15,6 @@ import (
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/sasha-s/go-deadlock"
) )
var timeNow = time.Now var timeNow = time.Now
@ -39,9 +37,6 @@ type WaitGroup interface {
} }
func NewMutex() Mutex { func NewMutex() Mutex {
if useDeadlock {
return &deadlock.Mutex{}
}
if debug { if debug {
mutex := &loggedMutex{} mutex := &loggedMutex{}
mutex.holder.Store(holder{}) mutex.holder.Store(holder{})
@ -51,9 +46,6 @@ func NewMutex() Mutex {
} }
func NewRWMutex() RWMutex { func NewRWMutex() RWMutex {
if useDeadlock {
return &deadlock.RWMutex{}
}
if debug { if debug {
mutex := &loggedRWMutex{ mutex := &loggedRWMutex{
readHolders: make(map[int][]holder), readHolders: make(map[int][]holder),

View File

@ -55,7 +55,6 @@ const (
type Options struct { type Options struct {
AuditWriter io.Writer AuditWriter io.Writer
DeadlockTimeoutS int
NoUpgrade bool NoUpgrade bool
ProfilerAddr string ProfilerAddr string
ResetDeltaIdxs bool ResetDeltaIdxs bool
@ -251,12 +250,6 @@ func (a *App) startup() error {
keyGen := protocol.NewKeyGenerator() keyGen := protocol.NewKeyGenerator()
m := model.NewModel(a.cfg, a.myID, a.ll, protectedFiles, a.evLogger, keyGen) m := model.NewModel(a.cfg, a.myID, a.ll, protectedFiles, a.evLogger, keyGen)
if a.opts.DeadlockTimeoutS > 0 {
m.StartDeadlockDetector(time.Duration(a.opts.DeadlockTimeoutS) * time.Second)
} else if !build.IsRelease || build.IsBeta {
m.StartDeadlockDetector(20 * time.Minute)
}
a.mainService.Add(m) a.mainService.Add(m)
// The TLS configuration is used for both the listening socket and outgoing // The TLS configuration is used for both the listening socket and outgoing