lib/protocol: Refactor interface (#9375)

This is a refactor of the protocol/model interface to take the actual
message as the parameter, instead of the broken-out fields:

```diff
type Model interface {
        // An index was received from the peer device
-       Index(conn Connection, folder string, files []FileInfo) error
+       Index(conn Connection, idx *Index) error
        // An index update was received from the peer device
-       IndexUpdate(conn Connection, folder string, files []FileInfo) error
+       IndexUpdate(conn Connection, idxUp *IndexUpdate) error
        // A request was made by the peer device
-       Request(conn Connection, folder, name string, blockNo, size int32, offset int64, hash []byte, weakHash uint32, fromTemporary bool) (RequestResponse, error)
+       Request(conn Connection, req *Request) (RequestResponse, error)
        // A cluster configuration message was received
-       ClusterConfig(conn Connection, config ClusterConfig) error
+       ClusterConfig(conn Connection, config *ClusterConfig) error
        // The peer device closed the connection or an error occurred
        Closed(conn Connection, err error)
        // The peer device sent progress updates for the files it is currently downloading
-       DownloadProgress(conn Connection, folder string, updates []FileDownloadProgressUpdate) error
+       DownloadProgress(conn Connection, p *DownloadProgress) error
 }
```

(and changing the `ClusterConfig` to `*ClusterConfig` for symmetry;
we'll be forced to use all pointers everywhere at some point anyway...)

The reason for this is that I have another thing cooking which is a
small troubleshooting change to check index consistency during transfer.
This required adding a field or two to the index/indexupdate messages,
and plumbing the extra parameters in umpteen changes is almost as big a
diff as this is. I figured let's do it once and avoid having to do that
in the future again...

The rest of the diff falls out of the change above, much of it being in
test code where we run these methods manually...
This commit is contained in:
Jakob Borg 2024-01-31 08:18:27 +01:00 committed by GitHub
parent 8f5d07bd09
commit bda4016109
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 259 additions and 293 deletions

View File

@ -160,7 +160,7 @@ func (f *fakeConnection) sendIndexUpdate() {
for i := range f.files {
toSend[i] = prepareFileInfoForIndex(f.files[i])
}
f.model.IndexUpdate(f, f.folder, toSend)
f.model.IndexUpdate(f, &protocol.IndexUpdate{Folder: f.folder, Files: toSend})
}
func addFakeConn(m *testModel, dev protocol.DeviceID, folderID string) *fakeConnection {
@ -168,7 +168,7 @@ func addFakeConn(m *testModel, dev protocol.DeviceID, folderID string) *fakeConn
fc.folder = folderID
m.AddConnection(fc, protocol.Hello{})
m.ClusterConfig(fc, protocol.ClusterConfig{
m.ClusterConfig(fc, &protocol.ClusterConfig{
Folders: []protocol.Folder{
{
ID: folderID,

View File

@ -45,7 +45,7 @@ func TestRecvOnlyRevertDeletes(t *testing.T) {
// Send and index update for the known stuff
must(t, m.Index(conn, "ro", knownFiles))
must(t, m.Index(conn, &protocol.Index{Folder: "ro", Files: knownFiles}))
f.updateLocalsFromScanning(knownFiles)
size := globalSize(t, m, "ro")
@ -122,7 +122,7 @@ func TestRecvOnlyRevertNeeds(t *testing.T) {
// Send and index update for the known stuff
must(t, m.Index(conn, "ro", knownFiles))
must(t, m.Index(conn, &protocol.Index{Folder: "ro", Files: knownFiles}))
f.updateLocalsFromScanning(knownFiles)
// Scan the folder.
@ -212,7 +212,7 @@ func TestRecvOnlyUndoChanges(t *testing.T) {
// Send an index update for the known stuff
must(t, m.Index(conn, "ro", knownFiles))
must(t, m.Index(conn, &protocol.Index{Folder: "ro", Files: knownFiles}))
f.updateLocalsFromScanning(knownFiles)
// Scan the folder.
@ -282,7 +282,7 @@ func TestRecvOnlyDeletedRemoteDrop(t *testing.T) {
// Send an index update for the known stuff
must(t, m.Index(conn, "ro", knownFiles))
must(t, m.Index(conn, &protocol.Index{Folder: "ro", Files: knownFiles}))
f.updateLocalsFromScanning(knownFiles)
// Scan the folder.
@ -347,7 +347,7 @@ func TestRecvOnlyRemoteUndoChanges(t *testing.T) {
// Send an index update for the known stuff
must(t, m.Index(conn, "ro", knownFiles))
must(t, m.Index(conn, &protocol.Index{Folder: "ro", Files: knownFiles}))
f.updateLocalsFromScanning(knownFiles)
// Scan the folder.
@ -402,7 +402,7 @@ func TestRecvOnlyRemoteUndoChanges(t *testing.T) {
return true
})
snap.Release()
must(t, m.IndexUpdate(conn, "ro", files))
must(t, m.IndexUpdate(conn, &protocol.IndexUpdate{Folder: "ro", Files: files}))
// Ensure the pull to resolve conflicts (content identical) happened
must(t, f.doInSync(func() error {
@ -470,7 +470,7 @@ func TestRecvOnlyRevertOwnID(t *testing.T) {
}()
// Receive an index update with an older version, but valid and then revert
must(t, m.Index(conn, f.ID, []protocol.FileInfo{fi}))
must(t, m.Index(conn, &protocol.Index{Folder: f.ID, Files: []protocol.FileInfo{fi}}))
f.Revert()
select {
@ -497,7 +497,7 @@ func TestRecvOnlyLocalChangeDoesNotCauseConflict(t *testing.T) {
// Send an index update for the known stuff
must(t, m.Index(conn, "ro", knownFiles))
must(t, m.Index(conn, &protocol.Index{Folder: "ro", Files: knownFiles}))
f.updateLocalsFromScanning(knownFiles)
// Scan the folder.

View File

@ -1297,7 +1297,7 @@ func TestPullSymlinkOverExistingWindows(t *testing.T) {
if !ok {
t.Fatal("file missing")
}
must(t, m.Index(conn, f.ID, []protocol.FileInfo{{Name: name, Type: protocol.FileInfoTypeSymlink, Version: file.Version.Update(device1.Short())}}))
must(t, m.Index(conn, &protocol.Index{Folder: f.ID, Files: []protocol.FileInfo{{Name: name, Type: protocol.FileInfoTypeSymlink, Version: file.Version.Update(device1.Short())}}}))
scanChan := make(chan string)

View File

@ -50,11 +50,11 @@ type Model struct {
arg1 protocol.Connection
arg2 error
}
ClusterConfigStub func(protocol.Connection, protocol.ClusterConfig) error
ClusterConfigStub func(protocol.Connection, *protocol.ClusterConfig) error
clusterConfigMutex sync.RWMutex
clusterConfigArgsForCall []struct {
arg1 protocol.Connection
arg2 protocol.ClusterConfig
arg2 *protocol.ClusterConfig
}
clusterConfigReturns struct {
result1 error
@ -198,12 +198,11 @@ type Model struct {
dismissPendingFolderReturnsOnCall map[int]struct {
result1 error
}
DownloadProgressStub func(protocol.Connection, string, []protocol.FileDownloadProgressUpdate) error
DownloadProgressStub func(protocol.Connection, *protocol.DownloadProgress) error
downloadProgressMutex sync.RWMutex
downloadProgressArgsForCall []struct {
arg1 protocol.Connection
arg2 string
arg3 []protocol.FileDownloadProgressUpdate
arg2 *protocol.DownloadProgress
}
downloadProgressReturns struct {
result1 error
@ -290,12 +289,11 @@ type Model struct {
result1 []*model.TreeEntry
result2 error
}
IndexStub func(protocol.Connection, string, []protocol.FileInfo) error
IndexStub func(protocol.Connection, *protocol.Index) error
indexMutex sync.RWMutex
indexArgsForCall []struct {
arg1 protocol.Connection
arg2 string
arg3 []protocol.FileInfo
arg2 *protocol.Index
}
indexReturns struct {
result1 error
@ -303,12 +301,11 @@ type Model struct {
indexReturnsOnCall map[int]struct {
result1 error
}
IndexUpdateStub func(protocol.Connection, string, []protocol.FileInfo) error
IndexUpdateStub func(protocol.Connection, *protocol.IndexUpdate) error
indexUpdateMutex sync.RWMutex
indexUpdateArgsForCall []struct {
arg1 protocol.Connection
arg2 string
arg3 []protocol.FileInfo
arg2 *protocol.IndexUpdate
}
indexUpdateReturns struct {
result1 error
@ -424,18 +421,11 @@ type Model struct {
result1 []db.FileInfoTruncated
result2 error
}
RequestStub func(protocol.Connection, string, string, int32, int32, int64, []byte, uint32, bool) (protocol.RequestResponse, error)
RequestStub func(protocol.Connection, *protocol.Request) (protocol.RequestResponse, error)
requestMutex sync.RWMutex
requestArgsForCall []struct {
arg1 protocol.Connection
arg2 string
arg3 string
arg4 int32
arg5 int32
arg6 int64
arg7 []byte
arg8 uint32
arg9 bool
arg2 *protocol.Request
}
requestReturns struct {
result1 protocol.RequestResponse
@ -733,12 +723,12 @@ func (fake *Model) ClosedArgsForCall(i int) (protocol.Connection, error) {
return argsForCall.arg1, argsForCall.arg2
}
func (fake *Model) ClusterConfig(arg1 protocol.Connection, arg2 protocol.ClusterConfig) error {
func (fake *Model) ClusterConfig(arg1 protocol.Connection, arg2 *protocol.ClusterConfig) error {
fake.clusterConfigMutex.Lock()
ret, specificReturn := fake.clusterConfigReturnsOnCall[len(fake.clusterConfigArgsForCall)]
fake.clusterConfigArgsForCall = append(fake.clusterConfigArgsForCall, struct {
arg1 protocol.Connection
arg2 protocol.ClusterConfig
arg2 *protocol.ClusterConfig
}{arg1, arg2})
stub := fake.ClusterConfigStub
fakeReturns := fake.clusterConfigReturns
@ -759,13 +749,13 @@ func (fake *Model) ClusterConfigCallCount() int {
return len(fake.clusterConfigArgsForCall)
}
func (fake *Model) ClusterConfigCalls(stub func(protocol.Connection, protocol.ClusterConfig) error) {
func (fake *Model) ClusterConfigCalls(stub func(protocol.Connection, *protocol.ClusterConfig) error) {
fake.clusterConfigMutex.Lock()
defer fake.clusterConfigMutex.Unlock()
fake.ClusterConfigStub = stub
}
func (fake *Model) ClusterConfigArgsForCall(i int) (protocol.Connection, protocol.ClusterConfig) {
func (fake *Model) ClusterConfigArgsForCall(i int) (protocol.Connection, *protocol.ClusterConfig) {
fake.clusterConfigMutex.RLock()
defer fake.clusterConfigMutex.RUnlock()
argsForCall := fake.clusterConfigArgsForCall[i]
@ -1453,25 +1443,19 @@ func (fake *Model) DismissPendingFolderReturnsOnCall(i int, result1 error) {
}{result1}
}
func (fake *Model) DownloadProgress(arg1 protocol.Connection, arg2 string, arg3 []protocol.FileDownloadProgressUpdate) error {
var arg3Copy []protocol.FileDownloadProgressUpdate
if arg3 != nil {
arg3Copy = make([]protocol.FileDownloadProgressUpdate, len(arg3))
copy(arg3Copy, arg3)
}
func (fake *Model) DownloadProgress(arg1 protocol.Connection, arg2 *protocol.DownloadProgress) error {
fake.downloadProgressMutex.Lock()
ret, specificReturn := fake.downloadProgressReturnsOnCall[len(fake.downloadProgressArgsForCall)]
fake.downloadProgressArgsForCall = append(fake.downloadProgressArgsForCall, struct {
arg1 protocol.Connection
arg2 string
arg3 []protocol.FileDownloadProgressUpdate
}{arg1, arg2, arg3Copy})
arg2 *protocol.DownloadProgress
}{arg1, arg2})
stub := fake.DownloadProgressStub
fakeReturns := fake.downloadProgressReturns
fake.recordInvocation("DownloadProgress", []interface{}{arg1, arg2, arg3Copy})
fake.recordInvocation("DownloadProgress", []interface{}{arg1, arg2})
fake.downloadProgressMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3)
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1
@ -1485,17 +1469,17 @@ func (fake *Model) DownloadProgressCallCount() int {
return len(fake.downloadProgressArgsForCall)
}
func (fake *Model) DownloadProgressCalls(stub func(protocol.Connection, string, []protocol.FileDownloadProgressUpdate) error) {
func (fake *Model) DownloadProgressCalls(stub func(protocol.Connection, *protocol.DownloadProgress) error) {
fake.downloadProgressMutex.Lock()
defer fake.downloadProgressMutex.Unlock()
fake.DownloadProgressStub = stub
}
func (fake *Model) DownloadProgressArgsForCall(i int) (protocol.Connection, string, []protocol.FileDownloadProgressUpdate) {
func (fake *Model) DownloadProgressArgsForCall(i int) (protocol.Connection, *protocol.DownloadProgress) {
fake.downloadProgressMutex.RLock()
defer fake.downloadProgressMutex.RUnlock()
argsForCall := fake.downloadProgressArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
return argsForCall.arg1, argsForCall.arg2
}
func (fake *Model) DownloadProgressReturns(result1 error) {
@ -1898,25 +1882,19 @@ func (fake *Model) GlobalDirectoryTreeReturnsOnCall(i int, result1 []*model.Tree
}{result1, result2}
}
func (fake *Model) Index(arg1 protocol.Connection, arg2 string, arg3 []protocol.FileInfo) error {
var arg3Copy []protocol.FileInfo
if arg3 != nil {
arg3Copy = make([]protocol.FileInfo, len(arg3))
copy(arg3Copy, arg3)
}
func (fake *Model) Index(arg1 protocol.Connection, arg2 *protocol.Index) error {
fake.indexMutex.Lock()
ret, specificReturn := fake.indexReturnsOnCall[len(fake.indexArgsForCall)]
fake.indexArgsForCall = append(fake.indexArgsForCall, struct {
arg1 protocol.Connection
arg2 string
arg3 []protocol.FileInfo
}{arg1, arg2, arg3Copy})
arg2 *protocol.Index
}{arg1, arg2})
stub := fake.IndexStub
fakeReturns := fake.indexReturns
fake.recordInvocation("Index", []interface{}{arg1, arg2, arg3Copy})
fake.recordInvocation("Index", []interface{}{arg1, arg2})
fake.indexMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3)
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1
@ -1930,17 +1908,17 @@ func (fake *Model) IndexCallCount() int {
return len(fake.indexArgsForCall)
}
func (fake *Model) IndexCalls(stub func(protocol.Connection, string, []protocol.FileInfo) error) {
func (fake *Model) IndexCalls(stub func(protocol.Connection, *protocol.Index) error) {
fake.indexMutex.Lock()
defer fake.indexMutex.Unlock()
fake.IndexStub = stub
}
func (fake *Model) IndexArgsForCall(i int) (protocol.Connection, string, []protocol.FileInfo) {
func (fake *Model) IndexArgsForCall(i int) (protocol.Connection, *protocol.Index) {
fake.indexMutex.RLock()
defer fake.indexMutex.RUnlock()
argsForCall := fake.indexArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
return argsForCall.arg1, argsForCall.arg2
}
func (fake *Model) IndexReturns(result1 error) {
@ -1966,25 +1944,19 @@ func (fake *Model) IndexReturnsOnCall(i int, result1 error) {
}{result1}
}
func (fake *Model) IndexUpdate(arg1 protocol.Connection, arg2 string, arg3 []protocol.FileInfo) error {
var arg3Copy []protocol.FileInfo
if arg3 != nil {
arg3Copy = make([]protocol.FileInfo, len(arg3))
copy(arg3Copy, arg3)
}
func (fake *Model) IndexUpdate(arg1 protocol.Connection, arg2 *protocol.IndexUpdate) error {
fake.indexUpdateMutex.Lock()
ret, specificReturn := fake.indexUpdateReturnsOnCall[len(fake.indexUpdateArgsForCall)]
fake.indexUpdateArgsForCall = append(fake.indexUpdateArgsForCall, struct {
arg1 protocol.Connection
arg2 string
arg3 []protocol.FileInfo
}{arg1, arg2, arg3Copy})
arg2 *protocol.IndexUpdate
}{arg1, arg2})
stub := fake.IndexUpdateStub
fakeReturns := fake.indexUpdateReturns
fake.recordInvocation("IndexUpdate", []interface{}{arg1, arg2, arg3Copy})
fake.recordInvocation("IndexUpdate", []interface{}{arg1, arg2})
fake.indexUpdateMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3)
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1
@ -1998,17 +1970,17 @@ func (fake *Model) IndexUpdateCallCount() int {
return len(fake.indexUpdateArgsForCall)
}
func (fake *Model) IndexUpdateCalls(stub func(protocol.Connection, string, []protocol.FileInfo) error) {
func (fake *Model) IndexUpdateCalls(stub func(protocol.Connection, *protocol.IndexUpdate) error) {
fake.indexUpdateMutex.Lock()
defer fake.indexUpdateMutex.Unlock()
fake.IndexUpdateStub = stub
}
func (fake *Model) IndexUpdateArgsForCall(i int) (protocol.Connection, string, []protocol.FileInfo) {
func (fake *Model) IndexUpdateArgsForCall(i int) (protocol.Connection, *protocol.IndexUpdate) {
fake.indexUpdateMutex.RLock()
defer fake.indexUpdateMutex.RUnlock()
argsForCall := fake.indexUpdateArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3
return argsForCall.arg1, argsForCall.arg2
}
func (fake *Model) IndexUpdateReturns(result1 error) {
@ -2521,31 +2493,19 @@ func (fake *Model) RemoteNeedFolderFilesReturnsOnCall(i int, result1 []db.FileIn
}{result1, result2}
}
func (fake *Model) Request(arg1 protocol.Connection, arg2 string, arg3 string, arg4 int32, arg5 int32, arg6 int64, arg7 []byte, arg8 uint32, arg9 bool) (protocol.RequestResponse, error) {
var arg7Copy []byte
if arg7 != nil {
arg7Copy = make([]byte, len(arg7))
copy(arg7Copy, arg7)
}
func (fake *Model) Request(arg1 protocol.Connection, arg2 *protocol.Request) (protocol.RequestResponse, error) {
fake.requestMutex.Lock()
ret, specificReturn := fake.requestReturnsOnCall[len(fake.requestArgsForCall)]
fake.requestArgsForCall = append(fake.requestArgsForCall, struct {
arg1 protocol.Connection
arg2 string
arg3 string
arg4 int32
arg5 int32
arg6 int64
arg7 []byte
arg8 uint32
arg9 bool
}{arg1, arg2, arg3, arg4, arg5, arg6, arg7Copy, arg8, arg9})
arg2 *protocol.Request
}{arg1, arg2})
stub := fake.RequestStub
fakeReturns := fake.requestReturns
fake.recordInvocation("Request", []interface{}{arg1, arg2, arg3, arg4, arg5, arg6, arg7Copy, arg8, arg9})
fake.recordInvocation("Request", []interface{}{arg1, arg2})
fake.requestMutex.Unlock()
if stub != nil {
return stub(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9)
return stub(arg1, arg2)
}
if specificReturn {
return ret.result1, ret.result2
@ -2559,17 +2519,17 @@ func (fake *Model) RequestCallCount() int {
return len(fake.requestArgsForCall)
}
func (fake *Model) RequestCalls(stub func(protocol.Connection, string, string, int32, int32, int64, []byte, uint32, bool) (protocol.RequestResponse, error)) {
func (fake *Model) RequestCalls(stub func(protocol.Connection, *protocol.Request) (protocol.RequestResponse, error)) {
fake.requestMutex.Lock()
defer fake.requestMutex.Unlock()
fake.RequestStub = stub
}
func (fake *Model) RequestArgsForCall(i int) (protocol.Connection, string, string, int32, int32, int64, []byte, uint32, bool) {
func (fake *Model) RequestArgsForCall(i int) (protocol.Connection, *protocol.Request) {
fake.requestMutex.RLock()
defer fake.requestMutex.RUnlock()
argsForCall := fake.requestArgsForCall[i]
return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5, argsForCall.arg6, argsForCall.arg7, argsForCall.arg8, argsForCall.arg9
return argsForCall.arg1, argsForCall.arg2
}
func (fake *Model) RequestReturns(result1 protocol.RequestResponse, result2 error) {

View File

@ -1133,14 +1133,14 @@ func (p *pager) done() bool {
// Index is called when a new device is connected and we receive their full index.
// Implements the protocol.Model interface.
func (m *model) Index(conn protocol.Connection, folder string, fs []protocol.FileInfo) error {
return m.handleIndex(conn, folder, fs, false)
func (m *model) Index(conn protocol.Connection, idx *protocol.Index) error {
return m.handleIndex(conn, idx.Folder, idx.Files, false)
}
// IndexUpdate is called for incremental updates to connected devices' indexes.
// Implements the protocol.Model interface.
func (m *model) IndexUpdate(conn protocol.Connection, folder string, fs []protocol.FileInfo) error {
return m.handleIndex(conn, folder, fs, true)
func (m *model) IndexUpdate(conn protocol.Connection, idxUp *protocol.IndexUpdate) error {
return m.handleIndex(conn, idxUp.Folder, idxUp.Files, true)
}
func (m *model) handleIndex(conn protocol.Connection, folder string, fs []protocol.FileInfo, update bool) error {
@ -1182,7 +1182,7 @@ type ClusterConfigReceivedEventData struct {
Device protocol.DeviceID `json:"device"`
}
func (m *model) ClusterConfig(conn protocol.Connection, cm protocol.ClusterConfig) error {
func (m *model) ClusterConfig(conn protocol.Connection, cm *protocol.ClusterConfig) error {
deviceID := conn.DeviceID()
if cm.Secondary {
@ -1632,7 +1632,7 @@ func (m *model) sendClusterConfig(ids []protocol.DeviceID) {
}
// handleIntroductions handles adding devices/folders that are shared by an introducer device
func (m *model) handleIntroductions(introducerCfg config.DeviceConfiguration, cm protocol.ClusterConfig, folders map[string]config.FolderConfiguration, devices map[protocol.DeviceID]config.DeviceConfiguration) (map[string]config.FolderConfiguration, map[protocol.DeviceID]config.DeviceConfiguration, folderDeviceSet, bool) {
func (m *model) handleIntroductions(introducerCfg config.DeviceConfiguration, cm *protocol.ClusterConfig, folders map[string]config.FolderConfiguration, devices map[protocol.DeviceID]config.DeviceConfiguration) (map[string]config.FolderConfiguration, map[protocol.DeviceID]config.DeviceConfiguration, folderDeviceSet, bool) {
changed := false
foldersDevices := make(folderDeviceSet)
@ -1946,50 +1946,52 @@ func (r *requestResponse) Wait() {
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
func (m *model) Request(conn protocol.Connection, folder, name string, _, size int32, offset int64, hash []byte, weakHash uint32, fromTemporary bool) (out protocol.RequestResponse, err error) {
if size < 0 || offset < 0 {
func (m *model) Request(conn protocol.Connection, req *protocol.Request) (out protocol.RequestResponse, err error) {
if req.Size < 0 || req.Offset < 0 {
return nil, protocol.ErrInvalid
}
deviceID := conn.DeviceID()
m.mut.RLock()
folderCfg, ok := m.folderCfgs[folder]
folderIgnores := m.folderIgnores[folder]
folderCfg, ok := m.folderCfgs[req.Folder]
folderIgnores := m.folderIgnores[req.Folder]
m.mut.RUnlock()
if !ok {
// The folder might be already unpaused in the config, but not yet
// in the model.
l.Debugf("Request from %s for file %s in unstarted folder %q", deviceID.Short(), name, folder)
l.Debugf("Request from %s for file %s in unstarted folder %q", deviceID.Short(), req.Name, req.Folder)
return nil, protocol.ErrGeneric
}
if !folderCfg.SharedWith(deviceID) {
l.Warnf("Request from %s for file %s in unshared folder %q", deviceID.Short(), name, folder)
l.Warnf("Request from %s for file %s in unshared folder %q", deviceID.Short(), req.Name, req.Folder)
return nil, protocol.ErrGeneric
}
if folderCfg.Paused {
l.Debugf("Request from %s for file %s in paused folder %q", deviceID.Short(), name, folder)
l.Debugf("Request from %s for file %s in paused folder %q", deviceID.Short(), req.Name, req.Folder)
return nil, protocol.ErrGeneric
}
// Make sure the path is valid and in canonical form
if name, err = fs.Canonicalize(name); err != nil {
l.Debugf("Request from %s in folder %q for invalid filename %s", deviceID.Short(), folder, name)
if name, err := fs.Canonicalize(req.Name); err != nil {
l.Debugf("Request from %s in folder %q for invalid filename %s", deviceID.Short(), req.Folder, req.Name)
return nil, protocol.ErrGeneric
} else {
req.Name = name
}
if deviceID != protocol.LocalDeviceID {
l.Debugf("%v REQ(in): %s: %q / %q o=%d s=%d t=%v", m, deviceID.Short(), folder, name, offset, size, fromTemporary)
l.Debugf("%v REQ(in): %s: %q / %q o=%d s=%d t=%v", m, deviceID.Short(), req.Folder, req.Name, req.Offset, req.Size, req.FromTemporary)
}
if fs.IsInternal(name) {
l.Debugf("%v REQ(in) for internal file: %s: %q / %q o=%d s=%d", m, deviceID.Short(), folder, name, offset, size)
if fs.IsInternal(req.Name) {
l.Debugf("%v REQ(in) for internal file: %s: %q / %q o=%d s=%d", m, deviceID.Short(), req.Folder, req.Name, req.Offset, req.Size)
return nil, protocol.ErrInvalid
}
if folderIgnores.Match(name).IsIgnored() {
l.Debugf("%v REQ(in) for ignored file: %s: %q / %q o=%d s=%d", m, deviceID.Short(), folder, name, offset, size)
if folderIgnores.Match(req.Name).IsIgnored() {
l.Debugf("%v REQ(in) for ignored file: %s: %q / %q o=%d s=%d", m, deviceID.Short(), req.Folder, req.Name, req.Offset, req.Size)
return nil, protocol.ErrInvalid
}
@ -2001,7 +2003,7 @@ func (m *model) Request(conn protocol.Connection, folder, name string, _, size i
// The requestResponse releases the bytes to the buffer pool and the
// limiters when its Close method is called.
res := newLimitedRequestResponse(int(size), limiter, m.globalRequestLimiter)
res := newLimitedRequestResponse(int(req.Size), limiter, m.globalRequestLimiter)
defer func() {
// Close it ourselves if it isn't returned due to an error
@ -2015,40 +2017,40 @@ func (m *model) Request(conn protocol.Connection, folder, name string, _, size i
folderFs := folderCfg.Filesystem(nil)
if err := osutil.TraversesSymlink(folderFs, filepath.Dir(name)); err != nil {
l.Debugf("%v REQ(in) traversal check: %s - %s: %q / %q o=%d s=%d", m, err, deviceID.Short(), folder, name, offset, size)
if err := osutil.TraversesSymlink(folderFs, filepath.Dir(req.Name)); err != nil {
l.Debugf("%v REQ(in) traversal check: %s - %s: %q / %q o=%d s=%d", m, err, deviceID.Short(), req.Folder, req.Name, req.Offset, req.Size)
return nil, protocol.ErrNoSuchFile
}
// Only check temp files if the flag is set, and if we are set to advertise
// the temp indexes.
if fromTemporary && !folderCfg.DisableTempIndexes {
tempFn := fs.TempName(name)
if req.FromTemporary && !folderCfg.DisableTempIndexes {
tempFn := fs.TempName(req.Name)
if info, err := folderFs.Lstat(tempFn); err != nil || !info.IsRegular() {
// Reject reads for anything that doesn't exist or is something
// other than a regular file.
l.Debugf("%v REQ(in) failed stating temp file (%v): %s: %q / %q o=%d s=%d", m, err, deviceID.Short(), folder, name, offset, size)
l.Debugf("%v REQ(in) failed stating temp file (%v): %s: %q / %q o=%d s=%d", m, err, deviceID.Short(), req.Folder, req.Name, req.Offset, req.Size)
return nil, protocol.ErrNoSuchFile
}
_, err := readOffsetIntoBuf(folderFs, tempFn, offset, res.data)
if err == nil && scanner.Validate(res.data, hash, weakHash) {
_, err := readOffsetIntoBuf(folderFs, tempFn, req.Offset, res.data)
if err == nil && scanner.Validate(res.data, req.Hash, req.WeakHash) {
return res, nil
}
// Fall through to reading from a non-temp file, just in case the temp
// file has finished downloading.
}
if info, err := folderFs.Lstat(name); err != nil || !info.IsRegular() {
if info, err := folderFs.Lstat(req.Name); err != nil || !info.IsRegular() {
// Reject reads for anything that doesn't exist or is something
// other than a regular file.
l.Debugf("%v REQ(in) failed stating file (%v): %s: %q / %q o=%d s=%d", m, err, deviceID.Short(), folder, name, offset, size)
l.Debugf("%v REQ(in) failed stating file (%v): %s: %q / %q o=%d s=%d", m, err, deviceID.Short(), req.Folder, req.Name, req.Offset, req.Size)
return nil, protocol.ErrNoSuchFile
}
n, err := readOffsetIntoBuf(folderFs, name, offset, res.data)
n, err := readOffsetIntoBuf(folderFs, req.Name, req.Offset, res.data)
if fs.IsNotExist(err) {
l.Debugf("%v REQ(in) file doesn't exist: %s: %q / %q o=%d s=%d", m, deviceID.Short(), folder, name, offset, size)
l.Debugf("%v REQ(in) file doesn't exist: %s: %q / %q o=%d s=%d", m, deviceID.Short(), req.Folder, req.Name, req.Offset, req.Size)
return nil, protocol.ErrNoSuchFile
} else if err == io.EOF {
// Read beyond end of file. This might indicate a problem, or it
@ -2057,13 +2059,13 @@ func (m *model) Request(conn protocol.Connection, folder, name string, _, size i
// next step take care of it, by only hashing the part we actually
// managed to read.
} else if err != nil {
l.Debugf("%v REQ(in) failed reading file (%v): %s: %q / %q o=%d s=%d", m, err, deviceID.Short(), folder, name, offset, size)
l.Debugf("%v REQ(in) failed reading file (%v): %s: %q / %q o=%d s=%d", m, err, deviceID.Short(), req.Folder, req.Name, req.Offset, req.Size)
return nil, protocol.ErrGeneric
}
if folderCfg.Type != config.FolderTypeReceiveEncrypted && len(hash) > 0 && !scanner.Validate(res.data[:n], hash, weakHash) {
m.recheckFile(deviceID, folder, name, offset, hash, weakHash)
l.Debugf("%v REQ(in) failed validating data: %s: %q / %q o=%d s=%d", m, deviceID.Short(), folder, name, offset, size)
if folderCfg.Type != config.FolderTypeReceiveEncrypted && len(req.Hash) > 0 && !scanner.Validate(res.data[:n], req.Hash, req.WeakHash) {
m.recheckFile(deviceID, req.Folder, req.Name, req.Offset, req.Hash, req.WeakHash)
l.Debugf("%v REQ(in) failed validating data: %s: %q / %q o=%d s=%d", m, deviceID.Short(), req.Folder, req.Name, req.Offset, req.Size)
return nil, protocol.ErrNoSuchFile
}
@ -2416,11 +2418,11 @@ func (m *model) promoteConnections() {
}
}
func (m *model) DownloadProgress(conn protocol.Connection, folder string, updates []protocol.FileDownloadProgressUpdate) error {
func (m *model) DownloadProgress(conn protocol.Connection, p *protocol.DownloadProgress) error {
deviceID := conn.DeviceID()
m.mut.RLock()
cfg, ok := m.folderCfgs[folder]
cfg, ok := m.folderCfgs[p.Folder]
m.mut.RUnlock()
if !ok || cfg.DisableTempIndexes || !cfg.SharedWith(deviceID) {
@ -2430,12 +2432,12 @@ func (m *model) DownloadProgress(conn protocol.Connection, folder string, update
m.mut.RLock()
downloads := m.deviceDownloads[deviceID]
m.mut.RUnlock()
downloads.Update(folder, updates)
state := downloads.GetBlockCounts(folder)
downloads.Update(p.Folder, p.Updates)
state := downloads.GetBlockCounts(p.Folder)
m.evLogger.Log(events.RemoteDownloadProgress, map[string]interface{}{
"device": deviceID.String(),
"folder": folder,
"folder": p.Folder,
"state": state,
})

View File

@ -52,8 +52,8 @@ func newState(t testing.TB, cfg config.Configuration) (*testModel, context.Cance
return m, cancel
}
func createClusterConfig(remote protocol.DeviceID, ids ...string) protocol.ClusterConfig {
cc := protocol.ClusterConfig{
func createClusterConfig(remote protocol.DeviceID, ids ...string) *protocol.ClusterConfig {
cc := &protocol.ClusterConfig{
Folders: make([]protocol.Folder, len(ids)),
}
for i, id := range ids {
@ -65,7 +65,7 @@ func createClusterConfig(remote protocol.DeviceID, ids ...string) protocol.Clust
return addFolderDevicesToClusterConfig(cc, remote)
}
func addFolderDevicesToClusterConfig(cc protocol.ClusterConfig, remote protocol.DeviceID) protocol.ClusterConfig {
func addFolderDevicesToClusterConfig(cc *protocol.ClusterConfig, remote protocol.DeviceID) *protocol.ClusterConfig {
for i := range cc.Folders {
cc.Folders[i].Devices = []protocol.Device{
{ID: myID},
@ -94,7 +94,7 @@ func TestRequest(t *testing.T) {
m.ScanFolder("default")
// Existing, shared file
res, err := m.Request(device1Conn, "default", "foo", 0, 6, 0, nil, 0, false)
res, err := m.Request(device1Conn, &protocol.Request{Folder: "default", Name: "foo", Size: 6})
if err != nil {
t.Fatal(err)
}
@ -104,35 +104,35 @@ func TestRequest(t *testing.T) {
}
// Existing, nonshared file
_, err = m.Request(device2Conn, "default", "foo", 0, 6, 0, nil, 0, false)
_, err = m.Request(device2Conn, &protocol.Request{Folder: "default", Name: "foo", Size: 6})
if err == nil {
t.Error("Unexpected nil error on insecure file read")
}
// Nonexistent file
_, err = m.Request(device1Conn, "default", "nonexistent", 0, 6, 0, nil, 0, false)
_, err = m.Request(device1Conn, &protocol.Request{Folder: "default", Name: "nonexistent", Size: 6})
if err == nil {
t.Error("Unexpected nil error on insecure file read")
}
// Shared folder, but disallowed file name
_, err = m.Request(device1Conn, "default", "../walk.go", 0, 6, 0, nil, 0, false)
_, err = m.Request(device1Conn, &protocol.Request{Folder: "default", Name: "../walk.go", Size: 6})
if err == nil {
t.Error("Unexpected nil error on insecure file read")
}
// Negative offset
_, err = m.Request(device1Conn, "default", "foo", 0, -4, 0, nil, 0, false)
// Negative size
_, err = m.Request(device1Conn, &protocol.Request{Folder: "default", Name: "foo", Size: -4})
if err == nil {
t.Error("Unexpected nil error on insecure file read")
}
// Larger block than available
_, err = m.Request(device1Conn, "default", "foo", 0, 42, 0, []byte("hash necessary but not checked"), 0, false)
_, err = m.Request(device1Conn, &protocol.Request{Folder: "default", Name: "foo", Size: 42, Hash: []byte("hash necessary but not checked")})
if err == nil {
t.Error("Unexpected nil error on read past end of file")
}
_, err = m.Request(device1Conn, "default", "foo", 0, 42, 0, nil, 0, false)
_, err = m.Request(device1Conn, &protocol.Request{Folder: "default", Name: "foo", Size: 42})
if err != nil {
t.Error("Unexpected error when large read should be permitted")
}
@ -168,11 +168,11 @@ func benchmarkIndex(b *testing.B, nfiles int) {
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem(nil).URI())
files := genFiles(nfiles)
must(b, m.Index(device1Conn, fcfg.ID, files))
must(b, m.Index(device1Conn, &protocol.Index{Folder: fcfg.ID, Files: files}))
b.ResetTimer()
for i := 0; i < b.N; i++ {
must(b, m.Index(device1Conn, fcfg.ID, files))
must(b, m.Index(device1Conn, &protocol.Index{Folder: fcfg.ID, Files: files}))
}
b.ReportAllocs()
}
@ -197,11 +197,11 @@ func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) {
files := genFiles(nfiles)
ufiles := genFiles(nufiles)
must(b, m.Index(device1Conn, fcfg.ID, files))
must(b, m.Index(device1Conn, &protocol.Index{Folder: fcfg.ID, Files: files}))
b.ResetTimer()
for i := 0; i < b.N; i++ {
must(b, m.IndexUpdate(device1Conn, fcfg.ID, ufiles))
must(b, m.IndexUpdate(device1Conn, &protocol.IndexUpdate{Folder: fcfg.ID, Files: ufiles}))
}
b.ReportAllocs()
}
@ -218,7 +218,7 @@ func BenchmarkRequestOut(b *testing.B) {
fc.addFile(f.Name, 0o644, protocol.FileInfoTypeFile, []byte("some data to return"))
}
m.AddConnection(fc, protocol.Hello{})
must(b, m.Index(device1Conn, "default", files))
must(b, m.Index(device1Conn, &protocol.Index{Folder: "default", Files: files}))
b.ResetTimer()
for i := 0; i < b.N; i++ {
@ -247,7 +247,7 @@ func BenchmarkRequestInSingleFile(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := m.Request(device1Conn, "default", "request/for/a/file/in/a/couple/of/dirs/128k", 0, 128<<10, 0, nil, 0, false); err != nil {
if _, err := m.Request(device1Conn, &protocol.Request{Folder: "default", Name: "request/for/a/file/in/a/couple/of/dirs/128k", Size: 128 << 10}); err != nil {
b.Error(err)
}
}
@ -634,7 +634,7 @@ func TestIntroducer(t *testing.T) {
},
},
})
m.ClusterConfig(device1Conn, protocol.ClusterConfig{})
m.ClusterConfig(device1Conn, &protocol.ClusterConfig{})
if _, ok := m.cfg.Device(device2); ok {
t.Error("device 2 should have been removed")
@ -686,7 +686,7 @@ func TestIntroducer(t *testing.T) {
},
},
})
m.ClusterConfig(device1Conn, protocol.ClusterConfig{})
m.ClusterConfig(device1Conn, &protocol.ClusterConfig{})
if _, ok := m.cfg.Device(device2); !ok {
t.Error("device 2 should not have been removed")
@ -794,7 +794,7 @@ func TestIntroducer(t *testing.T) {
},
},
})
m.ClusterConfig(device1Conn, protocol.ClusterConfig{})
m.ClusterConfig(device1Conn, &protocol.ClusterConfig{})
if _, ok := m.cfg.Device(device2); !ok {
t.Error("device 2 should not have been removed")
@ -847,7 +847,7 @@ func TestIntroducer(t *testing.T) {
})
defer cleanupModel(m)
defer cancel()
m.ClusterConfig(device1Conn, protocol.ClusterConfig{})
m.ClusterConfig(device1Conn, &protocol.ClusterConfig{})
if _, ok := m.cfg.Device(device2); !ok {
t.Error("device 2 should not have been removed")
@ -1035,10 +1035,10 @@ func TestAutoAcceptNewFolderPremutationsNoPanic(t *testing.T) {
cfg.Folders = append(cfg.Folders, fcfg)
}
m, cancel := newState(t, cfg)
m.ClusterConfig(device1Conn, protocol.ClusterConfig{
m.ClusterConfig(device1Conn, &protocol.ClusterConfig{
Folders: []protocol.Folder{dev1folder},
})
m.ClusterConfig(device2Conn, protocol.ClusterConfig{
m.ClusterConfig(device2Conn, &protocol.ClusterConfig{
Folders: []protocol.Folder{dev2folder},
})
cleanupModel(m)
@ -1159,7 +1159,7 @@ func TestAutoAcceptNameConflict(t *testing.T) {
m, cancel := newState(t, defaultAutoAcceptCfg)
defer cleanupModel(m)
defer cancel()
m.ClusterConfig(device1Conn, protocol.ClusterConfig{
m.ClusterConfig(device1Conn, &protocol.ClusterConfig{
Folders: []protocol.Folder{
{
ID: id,
@ -1179,7 +1179,7 @@ func TestAutoAcceptPrefersLabel(t *testing.T) {
label := srand.String(8)
defer cleanupModel(m)
defer cancel()
m.ClusterConfig(device1Conn, addFolderDevicesToClusterConfig(protocol.ClusterConfig{
m.ClusterConfig(device1Conn, addFolderDevicesToClusterConfig(&protocol.ClusterConfig{
Folders: []protocol.Folder{
{
ID: id,
@ -1203,7 +1203,7 @@ func TestAutoAcceptFallsBackToID(t *testing.T) {
}
defer cleanupModel(m)
defer cancel()
m.ClusterConfig(device1Conn, addFolderDevicesToClusterConfig(protocol.ClusterConfig{
m.ClusterConfig(device1Conn, addFolderDevicesToClusterConfig(&protocol.ClusterConfig{
Folders: []protocol.Folder{
{
ID: id,
@ -1325,8 +1325,8 @@ func TestAutoAcceptEnc(t *testing.T) {
defer os.RemoveAll(id)
token := []byte("token")
basicCC := func() protocol.ClusterConfig {
return protocol.ClusterConfig{
basicCC := func() *protocol.ClusterConfig {
return &protocol.ClusterConfig{
Folders: []protocol.Folder{{
ID: id,
Label: id,
@ -1336,7 +1336,7 @@ func TestAutoAcceptEnc(t *testing.T) {
// Earlier tests might cause the connection to get closed, thus ClusterConfig
// would panic.
clusterConfig := func(deviceID protocol.DeviceID, cm protocol.ClusterConfig) {
clusterConfig := func(deviceID protocol.DeviceID, cm *protocol.ClusterConfig) {
conn := newFakeConnection(deviceID, m)
m.AddConnection(conn, protocol.Hello{})
m.ClusterConfig(conn, cm)
@ -1808,7 +1808,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
return string(bytes)
}
must(t, m.Index(conn, "default", testdata))
must(t, m.Index(conn, &protocol.Index{Folder: "default", Files: testdata}))
result, _ := m.GlobalDirectoryTree("default", "", -1, false)
@ -2015,7 +2015,7 @@ func benchmarkTree(b *testing.B, n1, n2 int) {
m.ScanFolder(fcfg.ID)
files := genDeepFiles(n1, n2)
must(b, m.Index(device1Conn, fcfg.ID, files))
must(b, m.Index(device1Conn, &protocol.Index{Folder: fcfg.ID, Files: files}))
b.ResetTimer()
for i := 0; i < b.N; i++ {
@ -2161,7 +2161,7 @@ func TestSharedWithClearedOnDisconnect(t *testing.T) {
conn2 := newFakeConnection(device2, m)
m.AddConnection(conn2, protocol.Hello{})
m.ClusterConfig(conn1, protocol.ClusterConfig{
m.ClusterConfig(conn1, &protocol.ClusterConfig{
Folders: []protocol.Folder{
{
ID: "default",
@ -2173,7 +2173,7 @@ func TestSharedWithClearedOnDisconnect(t *testing.T) {
},
},
})
m.ClusterConfig(conn2, protocol.ClusterConfig{
m.ClusterConfig(conn2, &protocol.ClusterConfig{
Folders: []protocol.Folder{
{
ID: "default",
@ -2426,7 +2426,7 @@ func TestRemoveDirWithContent(t *testing.T) {
file.Deleted = true
file.Version = file.Version.Update(device1.Short()).Update(device1.Short())
must(t, m.IndexUpdate(conn, fcfg.ID, []protocol.FileInfo{dir, file}))
must(t, m.IndexUpdate(conn, &protocol.IndexUpdate{Folder: fcfg.ID, Files: []protocol.FileInfo{dir, file}}))
// Is there something we could trigger on instead of just waiting?
timeout := time.NewTimer(5 * time.Second)
@ -2925,14 +2925,14 @@ func TestRequestLimit(t *testing.T) {
m.ScanFolder("default")
befReq := time.Now()
first, err := m.Request(conn, "default", file, 0, 2000, 0, nil, 0, false)
first, err := m.Request(conn, &protocol.Request{Folder: "default", Name: file, Size: 2000})
if err != nil {
t.Fatalf("First request failed: %v", err)
}
reqDur := time.Since(befReq)
returned := make(chan struct{})
go func() {
second, err := m.Request(conn, "default", file, 0, 2000, 0, nil, 0, false)
second, err := m.Request(conn, &protocol.Request{Folder: "default", Name: file, Size: 2000})
if err != nil {
t.Errorf("Second request failed: %v", err)
}
@ -3594,7 +3594,7 @@ func TestScanDeletedROChangedOnSR(t *testing.T) {
}
// A remote must have the file, otherwise the deletion below is
// automatically resolved as not a ro-changed item.
must(t, m.IndexUpdate(conn, fcfg.ID, []protocol.FileInfo{file}))
must(t, m.IndexUpdate(conn, &protocol.IndexUpdate{Folder: fcfg.ID, Files: []protocol.FileInfo{file}}))
must(t, ffs.Remove(name))
m.ScanFolders()
@ -3708,9 +3708,9 @@ func TestIssue6961(t *testing.T) {
version := protocol.Vector{}.Update(device1.Short())
// Remote, valid and existing file
must(t, m.Index(conn1, fcfg.ID, []protocol.FileInfo{{Name: name, Version: version, Sequence: 1}}))
must(t, m.Index(conn1, &protocol.Index{Folder: fcfg.ID, Files: []protocol.FileInfo{{Name: name, Version: version, Sequence: 1}}}))
// Remote, invalid (receive-only) and existing file
must(t, m.Index(conn2, fcfg.ID, []protocol.FileInfo{{Name: name, RawInvalid: true, Sequence: 1}}))
must(t, m.Index(conn2, &protocol.Index{Folder: fcfg.ID, Files: []protocol.FileInfo{{Name: name, RawInvalid: true, Sequence: 1}}}))
// Create a local file
if fd, err := tfs.OpenFile(name, fs.OptCreate, 0o666); err != nil {
t.Fatal(err)
@ -3736,7 +3736,7 @@ func TestIssue6961(t *testing.T) {
m.ScanFolders()
// Drop the remote index, add some other file.
must(t, m.Index(conn2, fcfg.ID, []protocol.FileInfo{{Name: "bar", RawInvalid: true, Sequence: 1}}))
must(t, m.Index(conn2, &protocol.Index{Folder: fcfg.ID, Files: []protocol.FileInfo{{Name: "bar", RawInvalid: true, Sequence: 1}}}))
// Pause and unpause folder to create new db.FileSet and thus recalculate everything
pauseFolder(t, wcfg, fcfg.ID, true)
@ -3759,7 +3759,7 @@ func TestCompletionEmptyGlobal(t *testing.T) {
m.mut.Unlock()
files[0].Deleted = true
files[0].Version = files[0].Version.Update(device1.Short())
must(t, m.IndexUpdate(conn, fcfg.ID, files))
must(t, m.IndexUpdate(conn, &protocol.IndexUpdate{Folder: fcfg.ID, Files: files}))
comp := m.testCompletion(protocol.LocalDeviceID, fcfg.ID)
if comp.CompletionPct != 95 {
t.Error("Expected completion of 95%, got", comp.CompletionPct)
@ -3780,26 +3780,26 @@ func TestNeedMetaAfterIndexReset(t *testing.T) {
// Start with two remotes having one file, then both deleting it, then
// only one adding it again.
must(t, m.Index(conn1, fcfg.ID, files))
must(t, m.Index(conn2, fcfg.ID, files))
must(t, m.Index(conn1, &protocol.Index{Folder: fcfg.ID, Files: files}))
must(t, m.Index(conn2, &protocol.Index{Folder: fcfg.ID, Files: files}))
seq++
files[0].SetDeleted(device2.Short())
files[0].Sequence = seq
must(t, m.IndexUpdate(conn1, fcfg.ID, files))
must(t, m.IndexUpdate(conn2, fcfg.ID, files))
must(t, m.IndexUpdate(conn1, &protocol.IndexUpdate{Folder: fcfg.ID, Files: files}))
must(t, m.IndexUpdate(conn2, &protocol.IndexUpdate{Folder: fcfg.ID, Files: files}))
seq++
files[0].Deleted = false
files[0].Size = 20
files[0].Version = files[0].Version.Update(device1.Short())
files[0].Sequence = seq
must(t, m.IndexUpdate(conn1, fcfg.ID, files))
must(t, m.IndexUpdate(conn1, &protocol.IndexUpdate{Folder: fcfg.ID, Files: files}))
if comp := m.testCompletion(device2, fcfg.ID); comp.NeedItems != 1 {
t.Error("Expected one needed item for device2, got", comp.NeedItems)
}
// Pretend we had an index reset on device 1
must(t, m.Index(conn1, fcfg.ID, files))
must(t, m.Index(conn1, &protocol.Index{Folder: fcfg.ID, Files: files}))
if comp := m.testCompletion(device2, fcfg.ID); comp.NeedItems != 1 {
t.Error("Expected one needed item for device2, got", comp.NeedItems)
}

View File

@ -107,7 +107,7 @@ func TestSymlinkTraversalRead(t *testing.T) {
<-done
// Request a file by traversing the symlink
res, err := m.Request(device1Conn, "default", "symlink/requests_test.go", 0, 10, 0, nil, 0, false)
res, err := m.Request(device1Conn, &protocol.Request{Folder: "default", Name: "symlink/requests_test.go", Size: 10})
if err == nil || res != nil {
t.Error("Managed to traverse symlink")
}
@ -440,7 +440,7 @@ func TestRescanIfHaveInvalidContent(t *testing.T) {
t.Fatalf("unexpected weak hash: %d != 103547413", f.Blocks[0].WeakHash)
}
res, err := m.Request(device1Conn, "default", "foo", 0, int32(len(payload)), 0, f.Blocks[0].Hash, f.Blocks[0].WeakHash, false)
res, err := m.Request(device1Conn, &protocol.Request{Folder: "default", Name: "foo", Size: len(payload), Hash: f.Blocks[0].Hash, WeakHash: f.Blocks[0].WeakHash})
if err != nil {
t.Fatal(err)
}
@ -454,7 +454,7 @@ func TestRescanIfHaveInvalidContent(t *testing.T) {
writeFile(t, tfs, "foo", payload)
_, err = m.Request(device1Conn, "default", "foo", 0, int32(len(payload)), 0, f.Blocks[0].Hash, f.Blocks[0].WeakHash, false)
_, err = m.Request(device1Conn, &protocol.Request{Folder: "default", Name: "foo", Size: len(payload), Hash: f.Blocks[0].Hash, WeakHash: f.Blocks[0].WeakHash})
if err == nil {
t.Fatalf("expected failure")
}
@ -1190,7 +1190,7 @@ func TestRequestIndexSenderPause(t *testing.T) {
// Folder removed on remote
cc = protocol.ClusterConfig{}
cc = &protocol.ClusterConfig{}
m.ClusterConfig(fc, cc)
seq++
@ -1305,7 +1305,7 @@ func TestRequestReceiveEncrypted(t *testing.T) {
return nil
})
m.AddConnection(fc, protocol.Hello{})
m.ClusterConfig(fc, protocol.ClusterConfig{
m.ClusterConfig(fc, &protocol.ClusterConfig{
Folders: []protocol.Folder{
{
ID: "default",
@ -1355,7 +1355,7 @@ func TestRequestReceiveEncrypted(t *testing.T) {
}
// Simulate request from device that is untrusted too, i.e. with non-empty, but garbage hash
_, err := m.Request(fc, fcfg.ID, name, 0, 1064, 0, []byte("garbage"), 0, false)
_, err := m.Request(fc, &protocol.Request{Folder: fcfg.ID, Name: name, Size: 1064, Hash: []byte("garbage")})
must(t, err)
changed, err := m.LocalChangedFolderFiles(fcfg.ID, 1, 10)
@ -1406,7 +1406,7 @@ func TestRequestGlobalInvalidToValid(t *testing.T) {
file := fc.files[0]
fc.mut.Unlock()
file.SetIgnored()
m.IndexUpdate(conn, fcfg.ID, []protocol.FileInfo{prepareFileInfoForIndex(file)})
m.IndexUpdate(conn, &protocol.IndexUpdate{Folder: fcfg.ID, Files: []protocol.FileInfo{prepareFileInfoForIndex(file)}})
// Wait for the ignored file to be received and possible pulled
timeout := time.After(10 * time.Second)

View File

@ -300,7 +300,7 @@ func folderIgnoresAlwaysReload(t testing.TB, m *testModel, fcfg config.FolderCon
m.mut.Unlock()
}
func basicClusterConfig(local, remote protocol.DeviceID, folders ...string) protocol.ClusterConfig {
func basicClusterConfig(local, remote protocol.DeviceID, folders ...string) *protocol.ClusterConfig {
var cc protocol.ClusterConfig
for _, folder := range folders {
cc.Folders = append(cc.Folders, protocol.Folder{
@ -315,7 +315,7 @@ func basicClusterConfig(local, remote protocol.DeviceID, folders ...string) prot
},
})
}
return cc
return &cc
}
func localIndexUpdate(m *testModel, folder string, fs []protocol.FileInfo) {

View File

@ -167,30 +167,30 @@ func negotiateTLS(cert tls.Certificate, conn0, conn1 net.Conn) (net.Conn, net.Co
type fakeModel struct{}
func (*fakeModel) Index(Connection, string, []FileInfo) error {
func (*fakeModel) Index(Connection, *Index) error {
return nil
}
func (*fakeModel) IndexUpdate(Connection, string, []FileInfo) error {
func (*fakeModel) IndexUpdate(Connection, *IndexUpdate) error {
return nil
}
func (*fakeModel) Request(_ Connection, _, _ string, _, size int32, offset int64, _ []byte, _ uint32, _ bool) (RequestResponse, error) {
func (*fakeModel) Request(_ Connection, req *Request) (RequestResponse, error) {
// We write the offset to the end of the buffer, so the receiver
// can verify that it did in fact get some data back over the
// connection.
buf := make([]byte, size)
binary.BigEndian.PutUint64(buf[len(buf)-8:], uint64(offset))
buf := make([]byte, req.Size)
binary.BigEndian.PutUint64(buf[len(buf)-8:], uint64(req.Offset))
return &fakeRequestResponse{buf}, nil
}
func (*fakeModel) ClusterConfig(Connection, ClusterConfig) error {
func (*fakeModel) ClusterConfig(Connection, *ClusterConfig) error {
return nil
}
func (*fakeModel) Closed(Connection, error) {
}
func (*fakeModel) DownloadProgress(Connection, string, []FileDownloadProgressUpdate) error {
func (*fakeModel) DownloadProgress(Connection, *DownloadProgress) error {
return nil
}

View File

@ -14,7 +14,7 @@ type TestModel struct {
weakHash uint32
fromTemporary bool
indexFn func(string, []FileInfo)
ccFn func(ClusterConfig)
ccFn func(*ClusterConfig)
closedCh chan struct{}
closedErr error
}
@ -25,25 +25,25 @@ func newTestModel() *TestModel {
}
}
func (t *TestModel) Index(_ Connection, folder string, files []FileInfo) error {
func (t *TestModel) Index(_ Connection, idx *Index) error {
if t.indexFn != nil {
t.indexFn(folder, files)
t.indexFn(idx.Folder, idx.Files)
}
return nil
}
func (*TestModel) IndexUpdate(Connection, string, []FileInfo) error {
func (*TestModel) IndexUpdate(Connection, *IndexUpdate) error {
return nil
}
func (t *TestModel) Request(_ Connection, folder, name string, _, size int32, offset int64, hash []byte, weakHash uint32, fromTemporary bool) (RequestResponse, error) {
t.folder = folder
t.name = name
t.offset = offset
t.size = size
t.hash = hash
t.weakHash = weakHash
t.fromTemporary = fromTemporary
func (t *TestModel) Request(_ Connection, req *Request) (RequestResponse, error) {
t.folder = req.Folder
t.name = req.Name
t.offset = req.Offset
t.size = int32(req.Size)
t.hash = req.Hash
t.weakHash = req.WeakHash
t.fromTemporary = req.FromTemporary
buf := make([]byte, len(t.data))
copy(buf, t.data)
return &fakeRequestResponse{buf}, nil
@ -54,14 +54,14 @@ func (t *TestModel) Closed(_ Connection, err error) {
close(t.closedCh)
}
func (t *TestModel) ClusterConfig(_ Connection, config ClusterConfig) error {
func (t *TestModel) ClusterConfig(_ Connection, config *ClusterConfig) error {
if t.ccFn != nil {
t.ccFn(config)
}
return nil
}
func (*TestModel) DownloadProgress(Connection, string, []FileDownloadProgressUpdate) error {
func (*TestModel) DownloadProgress(Connection, *DownloadProgress) error {
return nil
}

View File

@ -56,43 +56,43 @@ func newEncryptedModel(model rawModel, folderKeys *folderKeyRegistry, keyGen *Ke
}
}
func (e encryptedModel) Index(folder string, files []FileInfo) error {
if folderKey, ok := e.folderKeys.get(folder); ok {
func (e encryptedModel) Index(idx *Index) error {
if folderKey, ok := e.folderKeys.get(idx.Folder); ok {
// incoming index data to be decrypted
if err := decryptFileInfos(e.keyGen, files, folderKey); err != nil {
if err := decryptFileInfos(e.keyGen, idx.Files, folderKey); err != nil {
return err
}
}
return e.model.Index(folder, files)
return e.model.Index(idx)
}
func (e encryptedModel) IndexUpdate(folder string, files []FileInfo) error {
if folderKey, ok := e.folderKeys.get(folder); ok {
func (e encryptedModel) IndexUpdate(idxUp *IndexUpdate) error {
if folderKey, ok := e.folderKeys.get(idxUp.Folder); ok {
// incoming index data to be decrypted
if err := decryptFileInfos(e.keyGen, files, folderKey); err != nil {
if err := decryptFileInfos(e.keyGen, idxUp.Files, folderKey); err != nil {
return err
}
}
return e.model.IndexUpdate(folder, files)
return e.model.IndexUpdate(idxUp)
}
func (e encryptedModel) Request(folder, name string, blockNo, size int32, offset int64, hash []byte, weakHash uint32, fromTemporary bool) (RequestResponse, error) {
folderKey, ok := e.folderKeys.get(folder)
func (e encryptedModel) Request(req *Request) (RequestResponse, error) {
folderKey, ok := e.folderKeys.get(req.Folder)
if !ok {
return e.model.Request(folder, name, blockNo, size, offset, hash, weakHash, fromTemporary)
return e.model.Request(req)
}
// Figure out the real file name, offset and size from the encrypted /
// tweaked values.
realName, err := decryptName(name, folderKey)
realName, err := decryptName(req.Name, folderKey)
if err != nil {
return nil, fmt.Errorf("decrypting name: %w", err)
}
realSize := size - blockOverhead
realOffset := offset - int64(blockNo*blockOverhead)
realSize := req.Size - blockOverhead
realOffset := req.Offset - int64(req.BlockNo*blockOverhead)
if size < minPaddedSize {
if req.Size < minPaddedSize {
return nil, errors.New("short request")
}
@ -105,13 +105,13 @@ func (e encryptedModel) Request(folder, name string, blockNo, size int32, offset
var realHash []byte
fileKey := e.keyGen.FileKey(realName, folderKey)
if len(hash) > 0 {
if len(req.Hash) > 0 {
var additional [8]byte
binary.BigEndian.PutUint64(additional[:], uint64(realOffset))
realHash, err = decryptDeterministic(hash, fileKey, additional[:])
realHash, err = decryptDeterministic(req.Hash, fileKey, additional[:])
if err != nil {
// "Legacy", no offset additional data?
realHash, err = decryptDeterministic(hash, fileKey, nil)
realHash, err = decryptDeterministic(req.Hash, fileKey, nil)
}
if err != nil {
return nil, fmt.Errorf("decrypting block hash: %w", err)
@ -120,7 +120,11 @@ func (e encryptedModel) Request(folder, name string, blockNo, size int32, offset
// Perform that request and grab the data.
resp, err := e.model.Request(folder, realName, blockNo, realSize, realOffset, realHash, 0, false)
req.Name = realName
req.Size = realSize
req.Offset = realOffset
req.Hash = realHash
resp, err := e.model.Request(req)
if err != nil {
return nil, err
}
@ -142,16 +146,16 @@ func (e encryptedModel) Request(folder, name string, blockNo, size int32, offset
return rawResponse{enc}, nil
}
func (e encryptedModel) DownloadProgress(folder string, updates []FileDownloadProgressUpdate) error {
if _, ok := e.folderKeys.get(folder); !ok {
return e.model.DownloadProgress(folder, updates)
func (e encryptedModel) DownloadProgress(p *DownloadProgress) error {
if _, ok := e.folderKeys.get(p.Folder); !ok {
return e.model.DownloadProgress(p)
}
// Encrypted devices shouldn't send these - ignore them.
return nil
}
func (e encryptedModel) ClusterConfig(config ClusterConfig) error {
func (e encryptedModel) ClusterConfig(config *ClusterConfig) error {
return e.model.ClusterConfig(config)
}

View File

@ -15,21 +15,21 @@ type nativeModel struct {
rawModel
}
func (m nativeModel) Index(folder string, files []FileInfo) error {
for i := range files {
files[i].Name = norm.NFD.String(files[i].Name)
func (m nativeModel) Index(idx *Index) error {
for i := range idx.Files {
idx.Files[i].Name = norm.NFD.String(idx.Files[i].Name)
}
return m.rawModel.Index(folder, files)
return m.rawModel.Index(idx)
}
func (m nativeModel) IndexUpdate(folder string, files []FileInfo) error {
for i := range files {
files[i].Name = norm.NFD.String(files[i].Name)
func (m nativeModel) IndexUpdate(idxUp *IndexUpdate) error {
for i := range idxUp.Files {
idxUp.Files[i].Name = norm.NFD.String(idxUp.Files[i].Name)
}
return m.rawModel.IndexUpdate(folder, files)
return m.rawModel.IndexUpdate(idxUp)
}
func (m nativeModel) Request(folder, name string, blockNo, size int32, offset int64, hash []byte, weakHash uint32, fromTemporary bool) (RequestResponse, error) {
name = norm.NFD.String(name)
return m.rawModel.Request(folder, name, blockNo, size, offset, hash, weakHash, fromTemporary)
func (m nativeModel) Request(req *Request) (RequestResponse, error) {
req.Name = norm.NFD.String(req.Name)
return m.rawModel.Request(req)
}

View File

@ -19,24 +19,24 @@ type nativeModel struct {
rawModel
}
func (m nativeModel) Index(folder string, files []FileInfo) error {
files = fixupFiles(files)
return m.rawModel.Index(folder, files)
func (m nativeModel) Index(idx *Index) error {
idx.Files = fixupFiles(idx.Files)
return m.rawModel.Index(idx)
}
func (m nativeModel) IndexUpdate(folder string, files []FileInfo) error {
files = fixupFiles(files)
return m.rawModel.IndexUpdate(folder, files)
func (m nativeModel) IndexUpdate(idxUp *IndexUpdate) error {
idxUp.Files = fixupFiles(idxUp.Files)
return m.rawModel.IndexUpdate(idxUp)
}
func (m nativeModel) Request(folder, name string, blockNo, size int32, offset int64, hash []byte, weakHash uint32, fromTemporary bool) (RequestResponse, error) {
if strings.Contains(name, `\`) {
l.Warnf("Dropping request for %s, contains invalid path separator", name)
func (m nativeModel) Request(req *Request) (RequestResponse, error) {
if strings.Contains(req.Name, `\`) {
l.Warnf("Dropping request for %s, contains invalid path separator", req.Name)
return nil, ErrNoSuchFile
}
name = filepath.FromSlash(name)
return m.rawModel.Request(folder, name, blockNo, size, offset, hash, weakHash, fromTemporary)
req.Name = filepath.FromSlash(req.Name)
return m.rawModel.Request(req)
}
func fixupFiles(files []FileInfo) []FileInfo {

View File

@ -123,28 +123,28 @@ var (
type Model interface {
// An index was received from the peer device
Index(conn Connection, folder string, files []FileInfo) error
Index(conn Connection, idx *Index) error
// An index update was received from the peer device
IndexUpdate(conn Connection, folder string, files []FileInfo) error
IndexUpdate(conn Connection, idxUp *IndexUpdate) error
// A request was made by the peer device
Request(conn Connection, folder, name string, blockNo, size int32, offset int64, hash []byte, weakHash uint32, fromTemporary bool) (RequestResponse, error)
Request(conn Connection, req *Request) (RequestResponse, error)
// A cluster configuration message was received
ClusterConfig(conn Connection, config ClusterConfig) error
ClusterConfig(conn Connection, config *ClusterConfig) error
// The peer device closed the connection or an error occurred
Closed(conn Connection, err error)
// The peer device sent progress updates for the files it is currently downloading
DownloadProgress(conn Connection, folder string, updates []FileDownloadProgressUpdate) error
DownloadProgress(conn Connection, p *DownloadProgress) error
}
// rawModel is the Model interface, but without the initial Connection
// parameter. Internal use only.
type rawModel interface {
Index(folder string, files []FileInfo) error
IndexUpdate(folder string, files []FileInfo) error
Request(folder, name string, blockNo, size int32, offset int64, hash []byte, weakHash uint32, fromTemporary bool) (RequestResponse, error)
ClusterConfig(config ClusterConfig) error
Index(*Index) error
IndexUpdate(*IndexUpdate) error
Request(*Request) (RequestResponse, error)
ClusterConfig(*ClusterConfig) error
Closed(err error)
DownloadProgress(folder string, updates []FileDownloadProgressUpdate) error
DownloadProgress(*DownloadProgress) error
}
type RequestResponse interface {
@ -493,22 +493,22 @@ func (c *rawConnection) dispatcherLoop() (err error) {
switch msg := msg.(type) {
case *ClusterConfig:
err = c.model.ClusterConfig(*msg)
err = c.model.ClusterConfig(msg)
case *Index:
err = c.handleIndex(*msg)
err = c.handleIndex(msg)
case *IndexUpdate:
err = c.handleIndexUpdate(*msg)
err = c.handleIndexUpdate(msg)
case *Request:
go c.handleRequest(*msg)
go c.handleRequest(msg)
case *Response:
c.handleResponse(*msg)
c.handleResponse(msg)
case *DownloadProgress:
err = c.model.DownloadProgress(msg.Folder, msg.Updates)
err = c.model.DownloadProgress(msg)
}
if err != nil {
return newHandleError(err, msgContext)
@ -613,14 +613,14 @@ func (c *rawConnection) readHeader(fourByteBuf []byte) (Header, error) {
return hdr, nil
}
func (c *rawConnection) handleIndex(im Index) error {
func (c *rawConnection) handleIndex(im *Index) error {
l.Debugf("Index(%v, %v, %d file)", c.deviceID, im.Folder, len(im.Files))
return c.model.Index(im.Folder, im.Files)
return c.model.Index(im)
}
func (c *rawConnection) handleIndexUpdate(im IndexUpdate) error {
func (c *rawConnection) handleIndexUpdate(im *IndexUpdate) error {
l.Debugf("queueing IndexUpdate(%v, %v, %d files)", c.deviceID, im.Folder, len(im.Files))
return c.model.IndexUpdate(im.Folder, im.Files)
return c.model.IndexUpdate(im)
}
// checkIndexConsistency verifies a number of invariants on FileInfos received in
@ -685,8 +685,8 @@ func checkFilename(name string) error {
return nil
}
func (c *rawConnection) handleRequest(req Request) {
res, err := c.model.Request(req.Folder, req.Name, int32(req.BlockNo), int32(req.Size), req.Offset, req.Hash, req.WeakHash, req.FromTemporary)
func (c *rawConnection) handleRequest(req *Request) {
res, err := c.model.Request(req)
if err != nil {
c.send(context.Background(), &Response{
ID: req.ID,
@ -704,7 +704,7 @@ func (c *rawConnection) handleRequest(req Request) {
res.Close()
}
func (c *rawConnection) handleResponse(resp Response) {
func (c *rawConnection) handleResponse(resp *Response) {
c.awaitingMut.Lock()
if rc := c.awaiting[resp.ID]; rc != nil {
delete(c.awaiting, resp.ID)
@ -1127,19 +1127,19 @@ type connectionWrappingModel struct {
model Model
}
func (c *connectionWrappingModel) Index(folder string, files []FileInfo) error {
return c.model.Index(c.conn, folder, files)
func (c *connectionWrappingModel) Index(m *Index) error {
return c.model.Index(c.conn, m)
}
func (c *connectionWrappingModel) IndexUpdate(folder string, files []FileInfo) error {
return c.model.IndexUpdate(c.conn, folder, files)
func (c *connectionWrappingModel) IndexUpdate(idxUp *IndexUpdate) error {
return c.model.IndexUpdate(c.conn, idxUp)
}
func (c *connectionWrappingModel) Request(folder, name string, blockNo, size int32, offset int64, hash []byte, weakHash uint32, fromTemporary bool) (RequestResponse, error) {
return c.model.Request(c.conn, folder, name, blockNo, size, offset, hash, weakHash, fromTemporary)
func (c *connectionWrappingModel) Request(req *Request) (RequestResponse, error) {
return c.model.Request(c.conn, req)
}
func (c *connectionWrappingModel) ClusterConfig(config ClusterConfig) error {
func (c *connectionWrappingModel) ClusterConfig(config *ClusterConfig) error {
return c.model.ClusterConfig(c.conn, config)
}
@ -1147,6 +1147,6 @@ func (c *connectionWrappingModel) Closed(err error) {
c.model.Closed(c.conn, err)
}
func (c *connectionWrappingModel) DownloadProgress(folder string, updates []FileDownloadProgressUpdate) error {
return c.model.DownloadProgress(c.conn, folder, updates)
func (c *connectionWrappingModel) DownloadProgress(p *DownloadProgress) error {
return c.model.DownloadProgress(c.conn, p)
}

View File

@ -924,7 +924,7 @@ func TestDispatcherToCloseDeadlock(t *testing.T) {
m := newTestModel()
rw := testutil.NewBlockingRW()
c := getRawConnection(NewConnection(c0ID, rw, &testutil.NoopRW{}, testutil.NoopCloser{}, m, new(mockedConnectionInfo), CompressionAlways, nil, testKeyGen))
m.ccFn = func(ClusterConfig) {
m.ccFn = func(*ClusterConfig) {
c.Close(errManual)
}
c.Start()