Add scan percentages (fixes #1030)

This commit is contained in:
AudriusButkevicius 2015-08-26 23:49:06 +01:00
parent 875de4f637
commit 94c52e3a77
17 changed files with 202 additions and 72 deletions

2
Godeps/Godeps.json generated
View File

@ -39,7 +39,7 @@
},
{
"ImportPath": "github.com/syncthing/protocol",
"Rev": "388a29bbe21d8772ee4c29f4520aa8040309607d"
"Rev": "68c5dcd83d9be8f28ae59e951a87cdcf01c6f5cb"
},
{
"ImportPath": "github.com/syncthing/relaysrv/client",

View File

@ -20,6 +20,7 @@ type FileInfo struct {
Modified int64
Version Vector
LocalVersion int64
CachedSize int64 // noencode (cache only)
Blocks []BlockInfo // max:1000000
}

View File

@ -68,7 +68,7 @@ func main() {
if *standardBlocks || blockSize < protocol.BlockSize {
blockSize = protocol.BlockSize
}
bs, err := scanner.Blocks(fd, blockSize, fi.Size())
bs, err := scanner.Blocks(fd, blockSize, fi.Size(), nil)
if err != nil {
log.Fatal(err)
}

View File

@ -195,13 +195,20 @@
<div class="panel panel-default" ng-repeat="folder in folderList()">
<div class="panel-heading" data-toggle="collapse" data-parent="#folders" href="#folder-{{$index}}" style="cursor: pointer">
<div class="panel-progress" ng-show="folderStatus(folder) == 'syncing'" ng-attr-style="width: {{syncPercentage(folder.id)}}%"></div>
<div class="panel-progress" ng-show="folderStatus(folder) == 'scanning' && scanProgress[folder.id] != undefined" ng-attr-style="width: {{scanPercentage(folder.id)}}%"></div>
<h3 class="panel-title">
<span class="fa fa-folder hidden-xs"></span>{{folder.id}}
<span class="pull-right text-{{folderClass(folder)}}" ng-switch="folderStatus(folder)">
<span ng-switch-when="unknown"><span class="hidden-xs" translate>Unknown</span><span class="visible-xs">&#9724;</span></span>
<span ng-switch-when="unshared"><span class="hidden-xs" translate>Unshared</span><span class="visible-xs">&#9724;</span></span>
<span ng-switch-when="stopped"><span class="hidden-xs" translate>Stopped</span><span class="visible-xs">&#9724;</span></span>
<span ng-switch-when="scanning"><span class="hidden-xs" translate>Scanning</span><span class="visible-xs">&#9724;</span></span>
<span ng-switch-when="scanning">
<span class="hidden-xs" translate>Scanning</span>
<span class="hidden-xs" ng-if="scanPercentage(folder.id) != undefined">
({{scanPercentage(folder.id)}}%)
</span>
<span class="visible-xs">&#9724;</span>
</span>
<span ng-switch-when="idle"><span class="hidden-xs" translate>Up to Date</span><span class="visible-xs">&#9724;</span></span>
<span ng-switch-when="syncing">
<span class="hidden-xs" translate>Syncing</span>
@ -445,7 +452,7 @@
</tr>
<tr>
<th>
<span class="fa fa-fw fa-link"></span>
<span class="fa fa-fw fa-link"></span>
<span translate ng-if="connections[deviceCfg.deviceID].type.indexOf('direct') == 0" >Address</span>
<span translate ng-if="connections[deviceCfg.deviceID].type.indexOf('relay') == 0" >Relayed via</span>
</th>

View File

@ -78,6 +78,7 @@ angular.module('syncthing.core')
STARTUP_COMPLETED: 'StartupCompleted', // Emitted exactly once, when initialization is complete and Syncthing is ready to start exchanging data with other devices
STATE_CHANGED: 'StateChanged', // Emitted when a folder changes state
FOLDER_ERRORS: 'FolderErrors', // Emitted when a folder has errors preventing a full sync
FOLDER_SCAN_PROGRESS: 'FolderScanProgress', // Emitted every ScanProgressIntervalS seconds, indicating how far into the scan it is at.
start: function() {
$http.get(urlbase + '/events?limit=1')

View File

@ -48,6 +48,7 @@ angular.module('syncthing.core')
$scope.failedCurrentPage = 1;
$scope.failedCurrentFolder = undefined;
$scope.failedPageSize = 10;
$scope.scanProgress = {};
$scope.localStateTotal = {
bytes: 0,
@ -163,6 +164,12 @@ angular.module('syncthing.core')
if (data.to === 'syncing') {
$scope.failed[data.folder] = [];
}
// If a folder has started scanning, then any scan progress is
// also obsolete.
if (data.to === 'scanning') {
delete $scope.scanProgress[data.folder];
}
}
});
@ -310,6 +317,15 @@ angular.module('syncthing.core')
$scope.failed[data.folder] = data.errors;
});
$scope.$on(Events.FOLDER_SCAN_PROGRESS, function (event, arg) {
var data = arg.data;
$scope.scanProgress[data.folder] = {
current: data.current,
total: data.total
};
console.log("FolderScanProgress", data);
});
$scope.emitHTTPError = function (data, status, headers, config) {
$scope.$emit('HTTPError', {data: data, status: status, headers: headers, config: config});
};
@ -634,6 +650,14 @@ angular.module('syncthing.core')
return Math.floor(pct);
};
$scope.scanPercentage = function (folder) {
if (!$scope.scanProgress[folder]) {
return undefined;
}
var pct = 100 * $scope.scanProgress[folder].current / $scope.scanProgress[folder].total;
return Math.floor(pct);
}
$scope.deviceStatus = function (deviceCfg) {
if ($scope.deviceFolders(deviceCfg).length === 0) {
return 'unused';

File diff suppressed because one or more lines are too long

View File

@ -68,20 +68,21 @@ func (cfg Configuration) Copy() Configuration {
}
type FolderConfiguration struct {
ID string `xml:"id,attr" json:"id"`
RawPath string `xml:"path,attr" json:"path"`
Devices []FolderDeviceConfiguration `xml:"device" json:"devices"`
ReadOnly bool `xml:"ro,attr" json:"readOnly"`
RescanIntervalS int `xml:"rescanIntervalS,attr" json:"rescanIntervalS"`
IgnorePerms bool `xml:"ignorePerms,attr" json:"ignorePerms"`
AutoNormalize bool `xml:"autoNormalize,attr" json:"autoNormalize"`
MinDiskFreePct int `xml:"minDiskFreePct" json:"minDiskFreePct"`
Versioning VersioningConfiguration `xml:"versioning" json:"versioning"`
Copiers int `xml:"copiers" json:"copiers"` // This defines how many files are handled concurrently.
Pullers int `xml:"pullers" json:"pullers"` // Defines how many blocks are fetched at the same time, possibly between separate copier routines.
Hashers int `xml:"hashers" json:"hashers"` // Less than one sets the value to the number of cores. These are CPU bound due to hashing.
Order PullOrder `xml:"order" json:"order"`
IgnoreDelete bool `xml:"ignoreDelete" json:"ignoreDelete"`
ID string `xml:"id,attr" json:"id"`
RawPath string `xml:"path,attr" json:"path"`
Devices []FolderDeviceConfiguration `xml:"device" json:"devices"`
ReadOnly bool `xml:"ro,attr" json:"readOnly"`
RescanIntervalS int `xml:"rescanIntervalS,attr" json:"rescanIntervalS"`
IgnorePerms bool `xml:"ignorePerms,attr" json:"ignorePerms"`
AutoNormalize bool `xml:"autoNormalize,attr" json:"autoNormalize"`
MinDiskFreePct int `xml:"minDiskFreePct" json:"minDiskFreePct"`
Versioning VersioningConfiguration `xml:"versioning" json:"versioning"`
Copiers int `xml:"copiers" json:"copiers"` // This defines how many files are handled concurrently.
Pullers int `xml:"pullers" json:"pullers"` // Defines how many blocks are fetched at the same time, possibly between separate copier routines.
Hashers int `xml:"hashers" json:"hashers"` // Less than one sets the value to the number of cores. These are CPU bound due to hashing.
Order PullOrder `xml:"order" json:"order"`
IgnoreDelete bool `xml:"ignoreDelete" json:"ignoreDelete"`
ScanProgressIntervalS int `xml:"scanProgressInterval" json:"scanProgressInterval"` // Set to a negative value to disable. Value of 0 will get replaced with value of 2 (default value)
Invalid string `xml:"-" json:"invalid"` // Set at runtime when there is an error, not saved
}

View File

@ -38,6 +38,7 @@ const (
FolderSummary
FolderCompletion
FolderErrors
FolderScanProgress
AllEvents = (1 << iota) - 1
)
@ -84,6 +85,8 @@ func (t EventType) String() string {
return "DevicePaused"
case DeviceResumed:
return "DeviceResumed"
case FolderScanProgress:
return "FolderScanProgress"
default:
return "Unknown"
}

View File

@ -1297,18 +1297,20 @@ nextSub:
subs = unifySubs
w := &scanner.Walker{
Dir: folderCfg.Path(),
Subs: subs,
Matcher: ignores,
BlockSize: protocol.BlockSize,
TempNamer: defTempNamer,
TempLifetime: time.Duration(m.cfg.Options().KeepTemporariesH) * time.Hour,
CurrentFiler: cFiler{m, folder},
MtimeRepo: db.NewVirtualMtimeRepo(m.db, folderCfg.ID),
IgnorePerms: folderCfg.IgnorePerms,
AutoNormalize: folderCfg.AutoNormalize,
Hashers: m.numHashers(folder),
ShortID: m.shortID,
Folder: folderCfg.ID,
Dir: folderCfg.Path(),
Subs: subs,
Matcher: ignores,
BlockSize: protocol.BlockSize,
TempNamer: defTempNamer,
TempLifetime: time.Duration(m.cfg.Options().KeepTemporariesH) * time.Hour,
CurrentFiler: cFiler{m, folder},
MtimeRepo: db.NewVirtualMtimeRepo(m.db, folderCfg.ID),
IgnorePerms: folderCfg.IgnorePerms,
AutoNormalize: folderCfg.AutoNormalize,
Hashers: m.numHashers(folder),
ShortID: m.shortID,
ProgressTickIntervalS: folderCfg.ScanProgressIntervalS,
}
runner.setState(FolderScanning)

View File

@ -989,7 +989,7 @@ func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
// Check for an old temporary file which might have some blocks we could
// reuse.
tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize)
tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize, 0, nil)
if err == nil {
// Check for any reusable blocks in the temp file
tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks)

View File

@ -241,7 +241,7 @@ func TestCopierFinder(t *testing.T) {
}
// Verify that the fetched blocks have actually been written to the temp file
blks, err := scanner.HashFile(tempFile, protocol.BlockSize)
blks, err := scanner.HashFile(tempFile, protocol.BlockSize, 0, nil)
if err != nil {
t.Log(err)
}

View File

@ -19,24 +19,27 @@ import (
// workers are used in parallel. The outbox will become closed when the inbox
// is closed and all items handled.
func newParallelHasher(dir string, blockSize, workers int, outbox, inbox chan protocol.FileInfo) {
func newParallelHasher(dir string, blockSize, workers int, outbox, inbox chan protocol.FileInfo, counter *uint64, done chan struct{}) {
wg := sync.NewWaitGroup()
wg.Add(workers)
for i := 0; i < workers; i++ {
go func() {
hashFiles(dir, blockSize, outbox, inbox)
hashFiles(dir, blockSize, outbox, inbox, counter)
wg.Done()
}()
}
go func() {
wg.Wait()
if done != nil {
close(done)
}
close(outbox)
}()
}
func HashFile(path string, blockSize int) ([]protocol.BlockInfo, error) {
func HashFile(path string, blockSize int, sizeHint int64, counter *uint64) ([]protocol.BlockInfo, error) {
fd, err := os.Open(path)
if err != nil {
if debug {
@ -44,27 +47,29 @@ func HashFile(path string, blockSize int) ([]protocol.BlockInfo, error) {
}
return []protocol.BlockInfo{}, err
}
fi, err := fd.Stat()
if err != nil {
fd.Close()
if debug {
l.Debugln("stat:", err)
}
return []protocol.BlockInfo{}, err
}
defer fd.Close()
return Blocks(fd, blockSize, fi.Size())
if sizeHint == 0 {
fi, err := fd.Stat()
if err != nil {
if debug {
l.Debugln("stat:", err)
}
return []protocol.BlockInfo{}, err
}
sizeHint = fi.Size()
}
return Blocks(fd, blockSize, sizeHint, counter)
}
func hashFiles(dir string, blockSize int, outbox, inbox chan protocol.FileInfo) {
func hashFiles(dir string, blockSize int, outbox, inbox chan protocol.FileInfo, counter *uint64) {
for f := range inbox {
if f.IsDirectory() || f.IsDeleted() || f.IsSymlink() {
outbox <- f
continue
if f.IsDirectory() || f.IsDeleted() {
panic("Bug. Asked to hash a directory or a deleted file.")
}
blocks, err := HashFile(filepath.Join(dir, f.Name), blockSize)
blocks, err := HashFile(filepath.Join(dir, f.Name), blockSize, f.CachedSize, counter)
if err != nil {
if debug {
l.Debugln("hash error:", f.Name, err)

View File

@ -11,6 +11,7 @@ import (
"crypto/sha256"
"fmt"
"io"
"sync/atomic"
"github.com/syncthing/protocol"
)
@ -18,7 +19,7 @@ import (
var SHA256OfNothing = []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}
// Blocks returns the blockwise hash of the reader.
func Blocks(r io.Reader, blocksize int, sizehint int64) ([]protocol.BlockInfo, error) {
func Blocks(r io.Reader, blocksize int, sizehint int64, counter *uint64) ([]protocol.BlockInfo, error) {
var blocks []protocol.BlockInfo
if sizehint > 0 {
blocks = make([]protocol.BlockInfo, 0, int(sizehint/int64(blocksize)))
@ -36,6 +37,10 @@ func Blocks(r io.Reader, blocksize int, sizehint int64) ([]protocol.BlockInfo, e
break
}
if counter != nil {
atomic.AddUint64(counter, uint64(n))
}
b := protocol.BlockInfo{
Size: int32(n),
Offset: offset,

View File

@ -51,7 +51,7 @@ var blocksTestData = []struct {
func TestBlocks(t *testing.T) {
for _, test := range blocksTestData {
buf := bytes.NewBuffer(test.data)
blocks, err := Blocks(buf, test.blocksize, 0)
blocks, err := Blocks(buf, test.blocksize, 0, nil)
if err != nil {
t.Fatal(err)
@ -105,8 +105,8 @@ var diffTestData = []struct {
func TestDiff(t *testing.T) {
for i, test := range diffTestData {
a, _ := Blocks(bytes.NewBufferString(test.a), test.s, 0)
b, _ := Blocks(bytes.NewBufferString(test.b), test.s, 0)
a, _ := Blocks(bytes.NewBufferString(test.a), test.s, 0, nil)
b, _ := Blocks(bytes.NewBufferString(test.b), test.s, 0, nil)
_, d := BlockDiff(a, b)
if len(d) != len(test.d) {
t.Fatalf("Incorrect length for diff %d; %d != %d", i, len(d), len(test.d))

View File

@ -12,11 +12,13 @@ import (
"path/filepath"
"runtime"
"strings"
"sync/atomic"
"time"
"unicode/utf8"
"github.com/syncthing/protocol"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/symlinks"
@ -39,6 +41,8 @@ func init() {
}
type Walker struct {
// Folder for which the walker has been created
Folder string
// Dir is the base directory for the walk
Dir string
// Limit walking to these paths within Dir, or no limit if Sub is empty
@ -66,6 +70,9 @@ type Walker struct {
Hashers int
// Our vector clock id
ShortID uint64
// Optional progress tick interval which defines how often FolderScanProgress
// events are emitted. Negative number means disabled.
ProgressTickIntervalS int
}
type TempNamer interface {
@ -92,12 +99,13 @@ func (w *Walker) Walk() (chan protocol.FileInfo, error) {
return nil, err
}
files := make(chan protocol.FileInfo)
hashedFiles := make(chan protocol.FileInfo)
newParallelHasher(w.Dir, w.BlockSize, w.Hashers, hashedFiles, files)
toHashChan := make(chan protocol.FileInfo)
finishedChan := make(chan protocol.FileInfo)
// A routine which walks the filesystem tree, and sends files which have
// been modified to the counter routine.
go func() {
hashFiles := w.walkAndHashFiles(files, hashedFiles)
hashFiles := w.walkAndHashFiles(toHashChan, finishedChan)
if len(w.Subs) == 0 {
filepath.Walk(w.Dir, hashFiles)
} else {
@ -105,10 +113,77 @@ func (w *Walker) Walk() (chan protocol.FileInfo, error) {
filepath.Walk(filepath.Join(w.Dir, sub), hashFiles)
}
}
close(files)
close(toHashChan)
}()
return hashedFiles, nil
// We're not required to emit scan progress events, just kick off hashers,
// and feed inputs directly from the walker.
if w.ProgressTickIntervalS < 0 {
newParallelHasher(w.Dir, w.BlockSize, w.Hashers, finishedChan, toHashChan, nil, nil)
return finishedChan, nil
}
// Defaults to every 2 seconds.
if w.ProgressTickIntervalS == 0 {
w.ProgressTickIntervalS = 2
}
ticker := time.NewTicker(time.Duration(w.ProgressTickIntervalS) * time.Second)
// We need to emit progress events, hence we create a routine which buffers
// the list of files to be hashed, counts the total number of
// bytes to hash, and once no more files need to be hashed (chan gets closed),
// start a routine which periodically emits FolderScanProgress events,
// until a stop signal is sent by the parallel hasher.
// Parallel hasher is stopped by this routine when we close the channel over
// which it receives the files we ask it to hash.
go func() {
var filesToHash []protocol.FileInfo
var total, progress uint64
for file := range toHashChan {
filesToHash = append(filesToHash, file)
total += uint64(file.CachedSize)
}
realToHashChan := make(chan protocol.FileInfo)
done := make(chan struct{})
newParallelHasher(w.Dir, w.BlockSize, w.Hashers, finishedChan, realToHashChan, &progress, done)
// A routine which actually emits the FolderScanProgress events
// every w.ProgressTicker ticks, until the hasher routines terminate.
go func() {
for {
select {
case <-done:
if debug {
l.Debugln("Walk progress done", w.Dir, w.Subs, w.BlockSize, w.Matcher)
}
ticker.Stop()
return
case <-ticker.C:
current := atomic.LoadUint64(&progress)
if debug {
l.Debugf("Walk %s %s current progress %d/%d (%d%%)", w.Dir, w.Subs, current, total, current*100/total)
}
events.Default.Log(events.FolderScanProgress, map[string]interface{}{
"folder": w.Folder,
"current": current,
"total": total,
})
}
}
}()
for _, file := range filesToHash {
if debug {
l.Debugln("real to hash:", file.Name)
}
realToHashChan <- file
}
close(realToHashChan)
}()
return finishedChan, nil
}
func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.WalkFunc {
@ -241,7 +316,7 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
return skip
}
blocks, err := Blocks(strings.NewReader(target), w.BlockSize, 0)
blocks, err := Blocks(strings.NewReader(target), w.BlockSize, 0, nil)
if err != nil {
if debug {
l.Debugln("hash link error:", p, err)
@ -272,10 +347,10 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
}
if debug {
l.Debugln("symlink to hash:", p, f)
l.Debugln("symlink changedb:", p, f)
}
fchan <- f
dchan <- f
return skip
}
@ -349,10 +424,11 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
}
f := protocol.FileInfo{
Name: rn,
Version: cf.Version.Update(w.ShortID),
Flags: flags,
Modified: mtime.Unix(),
Name: rn,
Version: cf.Version.Update(w.ShortID),
Flags: flags,
Modified: mtime.Unix(),
CachedSize: info.Size(),
}
if debug {
l.Debugln("to hash:", p, f)

View File

@ -149,8 +149,9 @@ func TestVerify(t *testing.T) {
// data should be an even multiple of blocksize long
data := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut e")
buf := bytes.NewBuffer(data)
var progress uint64
blocks, err := Blocks(buf, blocksize, 0)
blocks, err := Blocks(buf, blocksize, 0, &progress)
if err != nil {
t.Fatal(err)
}
@ -158,6 +159,10 @@ func TestVerify(t *testing.T) {
t.Fatalf("Incorrect number of blocks %d != %d", len(blocks), exp)
}
if uint64(len(data)) != progress {
t.Fatalf("Incorrect counter value %d != %d", len(data), progress)
}
buf = bytes.NewBuffer(data)
err = Verify(buf, blocksize, blocks)
t.Log(err)