2014-11-16 21:13:20 +01:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-09-29 21:43:32 +02:00
|
|
|
//
|
2015-03-07 21:36:35 +01:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
2014-06-01 22:50:14 +02:00
|
|
|
|
2013-12-15 11:43:31 +01:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"crypto/tls"
|
2014-01-26 14:28:41 +01:00
|
|
|
"flag"
|
2014-01-08 14:37:33 +01:00
|
|
|
"fmt"
|
2015-04-30 21:33:32 +02:00
|
|
|
"io/ioutil"
|
2013-12-15 11:43:31 +01:00
|
|
|
"log"
|
|
|
|
"net"
|
|
|
|
"net/http"
|
|
|
|
_ "net/http/pprof"
|
2014-10-06 09:25:45 +02:00
|
|
|
"net/url"
|
2013-12-15 11:43:31 +01:00
|
|
|
"os"
|
2014-03-28 14:36:57 +01:00
|
|
|
"path/filepath"
|
2014-06-23 10:38:50 +02:00
|
|
|
"regexp"
|
2014-01-10 00:09:27 +01:00
|
|
|
"runtime"
|
2014-04-14 12:13:50 +02:00
|
|
|
"runtime/pprof"
|
2014-04-18 13:20:42 +02:00
|
|
|
"strconv"
|
2013-12-15 11:43:31 +01:00
|
|
|
"strings"
|
|
|
|
"time"
|
2014-04-08 13:45:18 +02:00
|
|
|
|
2014-10-26 13:15:14 +01:00
|
|
|
"github.com/calmh/logger"
|
2014-04-01 20:36:54 +02:00
|
|
|
"github.com/juju/ratelimit"
|
2015-01-13 13:22:56 +01:00
|
|
|
"github.com/syncthing/protocol"
|
2015-08-06 11:29:25 +02:00
|
|
|
"github.com/syncthing/syncthing/lib/config"
|
|
|
|
"github.com/syncthing/syncthing/lib/db"
|
|
|
|
"github.com/syncthing/syncthing/lib/discover"
|
|
|
|
"github.com/syncthing/syncthing/lib/events"
|
|
|
|
"github.com/syncthing/syncthing/lib/model"
|
|
|
|
"github.com/syncthing/syncthing/lib/osutil"
|
2015-07-17 22:22:07 +02:00
|
|
|
"github.com/syncthing/syncthing/lib/relay"
|
2015-08-06 11:29:25 +02:00
|
|
|
"github.com/syncthing/syncthing/lib/symlinks"
|
|
|
|
"github.com/syncthing/syncthing/lib/upgrade"
|
2015-07-17 22:22:07 +02:00
|
|
|
|
2014-07-06 14:46:48 +02:00
|
|
|
"github.com/syndtr/goleveldb/leveldb"
|
2015-02-23 08:21:05 +01:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/errors"
|
2014-09-02 14:47:36 +02:00
|
|
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
2015-04-25 11:19:53 +02:00
|
|
|
"github.com/thejerf/suture"
|
2014-11-30 00:17:00 +01:00
|
|
|
"golang.org/x/crypto/bcrypt"
|
2013-12-15 11:43:31 +01:00
|
|
|
)
|
|
|
|
|
2014-04-19 16:38:11 +02:00
|
|
|
var (
|
2014-04-19 16:40:19 +02:00
|
|
|
Version = "unknown-dev"
|
2015-08-20 09:18:29 +02:00
|
|
|
Codename = "Beryllium Bedbug"
|
2014-06-13 20:44:00 +02:00
|
|
|
BuildEnv = "default"
|
2014-04-19 16:40:19 +02:00
|
|
|
BuildStamp = "0"
|
|
|
|
BuildDate time.Time
|
2014-04-19 16:44:28 +02:00
|
|
|
BuildHost = "unknown"
|
|
|
|
BuildUser = "unknown"
|
2014-10-23 19:09:53 +02:00
|
|
|
IsRelease bool
|
2015-04-08 14:35:03 +02:00
|
|
|
IsBeta bool
|
2014-04-19 16:40:19 +02:00
|
|
|
LongVersion string
|
2014-04-19 16:38:11 +02:00
|
|
|
)
|
|
|
|
|
2014-09-02 13:08:24 +02:00
|
|
|
const (
|
|
|
|
exitSuccess = 0
|
|
|
|
exitError = 1
|
|
|
|
exitNoUpgradeAvailable = 2
|
|
|
|
exitRestarting = 3
|
2014-09-13 16:25:39 +02:00
|
|
|
exitUpgrading = 4
|
2014-09-02 13:08:24 +02:00
|
|
|
)
|
|
|
|
|
2015-03-26 23:26:51 +01:00
|
|
|
const (
|
|
|
|
bepProtocolName = "bep/1.0"
|
|
|
|
pingEventInterval = time.Minute
|
|
|
|
)
|
2015-03-05 15:58:16 +01:00
|
|
|
|
2014-05-15 02:08:56 +02:00
|
|
|
var l = logger.DefaultLogger
|
|
|
|
|
2014-04-19 16:38:11 +02:00
|
|
|
func init() {
|
2014-06-23 10:38:50 +02:00
|
|
|
if Version != "unknown-dev" {
|
|
|
|
// If not a generic dev build, version string should come from git describe
|
2014-08-05 19:38:31 +02:00
|
|
|
exp := regexp.MustCompile(`^v\d+\.\d+\.\d+(-[a-z0-9]+)*(\+\d+-g[0-9a-f]+)?(-dirty)?$`)
|
2014-06-23 10:38:50 +02:00
|
|
|
if !exp.MatchString(Version) {
|
|
|
|
l.Fatalf("Invalid version string %q;\n\tdoes not match regexp %v", Version, exp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-21 02:06:10 +02:00
|
|
|
// Check for a clean release build. A release is something like "v0.1.2",
|
|
|
|
// with an optional suffix of letters and dot separated numbers like
|
|
|
|
// "-beta3.47". If there's more stuff, like a plus sign and a commit hash
|
|
|
|
// and so on, then it's not a release. If there's a dash anywhere in
|
|
|
|
// there, it's some kind of beta or prerelease version.
|
|
|
|
|
|
|
|
exp := regexp.MustCompile(`^v\d+\.\d+\.\d+(-[a-z]+[\d\.]+)?$`)
|
2014-10-23 19:09:53 +02:00
|
|
|
IsRelease = exp.MatchString(Version)
|
2015-04-21 02:06:10 +02:00
|
|
|
IsBeta = strings.Contains(Version, "-")
|
2014-10-23 19:09:53 +02:00
|
|
|
|
2014-04-19 16:38:11 +02:00
|
|
|
stamp, _ := strconv.Atoi(BuildStamp)
|
|
|
|
BuildDate = time.Unix(int64(stamp), 0)
|
2014-04-19 16:40:19 +02:00
|
|
|
|
2014-05-11 22:26:48 +02:00
|
|
|
date := BuildDate.UTC().Format("2006-01-02 15:04:05 MST")
|
2015-08-18 13:30:25 +02:00
|
|
|
LongVersion = fmt.Sprintf(`syncthing %s "%s" (%s %s-%s %s) %s@%s %s`, Version, Codename, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildEnv, BuildUser, BuildHost, date)
|
2014-05-15 02:08:56 +02:00
|
|
|
|
|
|
|
if os.Getenv("STTRACE") != "" {
|
2014-06-04 10:24:30 +02:00
|
|
|
logFlags = log.Ltime | log.Ldate | log.Lmicroseconds | log.Lshortfile
|
2014-05-15 02:08:56 +02:00
|
|
|
}
|
2014-04-19 16:38:11 +02:00
|
|
|
}
|
2013-12-18 19:36:28 +01:00
|
|
|
|
2013-12-15 11:43:31 +01:00
|
|
|
var (
|
2014-12-08 16:39:11 +01:00
|
|
|
cfg *config.Wrapper
|
2014-09-28 13:00:38 +02:00
|
|
|
myID protocol.DeviceID
|
2014-09-08 17:25:55 +02:00
|
|
|
confDir string
|
2014-12-08 16:36:15 +01:00
|
|
|
logFlags = log.Ltime
|
2014-09-08 17:25:55 +02:00
|
|
|
writeRateLimit *ratelimit.Bucket
|
|
|
|
readRateLimit *ratelimit.Bucket
|
|
|
|
stop = make(chan int)
|
|
|
|
discoverer *discover.Discoverer
|
2015-07-17 22:22:07 +02:00
|
|
|
relaySvc *relay.Svc
|
2014-09-08 17:25:55 +02:00
|
|
|
cert tls.Certificate
|
2015-03-08 19:36:59 +01:00
|
|
|
lans []*net.IPNet
|
2014-01-26 14:28:41 +01:00
|
|
|
)
|
|
|
|
|
2014-03-09 08:48:29 +01:00
|
|
|
const (
|
|
|
|
usage = "syncthing [options]"
|
2014-10-06 17:55:54 +02:00
|
|
|
extraUsage = `
|
|
|
|
The default configuration directory is:
|
|
|
|
|
|
|
|
%s
|
|
|
|
|
|
|
|
|
|
|
|
The -logflags value is a sum of the following:
|
2014-06-04 10:24:30 +02:00
|
|
|
|
|
|
|
1 Date
|
|
|
|
2 Time
|
|
|
|
4 Microsecond time
|
|
|
|
8 Long filename
|
|
|
|
16 Short filename
|
|
|
|
|
|
|
|
I.e. to prefix each log line with date and time, set -logflags=3 (1 + 2 from
|
|
|
|
above). The value 0 is used to disable all of the above. The default is to
|
|
|
|
show time only (2).
|
|
|
|
|
2014-03-12 10:12:35 +01:00
|
|
|
|
2014-10-06 17:55:54 +02:00
|
|
|
Development Settings
|
|
|
|
--------------------
|
2014-08-27 23:38:36 +02:00
|
|
|
|
2014-10-06 17:55:54 +02:00
|
|
|
The following environment variables modify syncthing's behavior in ways that
|
|
|
|
are mostly useful for developers. Use with care.
|
2014-08-16 00:24:24 +02:00
|
|
|
|
2015-01-27 22:21:39 +01:00
|
|
|
STGUIASSETS Directory to load GUI assets from. Overrides compiled in assets.
|
2014-04-14 12:13:50 +02:00
|
|
|
|
2015-01-27 22:21:39 +01:00
|
|
|
STTRACE A comma separated string of facilities to trace. The valid
|
|
|
|
facility strings are:
|
2014-10-06 17:55:54 +02:00
|
|
|
|
2015-01-27 22:21:39 +01:00
|
|
|
- "beacon" (the beacon package)
|
|
|
|
- "discover" (the discover package)
|
|
|
|
- "events" (the events package)
|
|
|
|
- "files" (the files package)
|
2015-04-07 21:45:22 +02:00
|
|
|
- "http" (the main package; HTTP requests)
|
2015-04-24 02:43:40 +02:00
|
|
|
- "locks" (the sync package; trace long held locks)
|
2015-01-27 22:21:39 +01:00
|
|
|
- "net" (the main package; connections & network messages)
|
|
|
|
- "model" (the model package)
|
|
|
|
- "scanner" (the scanner package)
|
|
|
|
- "stats" (the stats package)
|
2015-06-03 09:47:39 +02:00
|
|
|
- "suture" (the suture package; service management)
|
2015-01-27 22:21:39 +01:00
|
|
|
- "upnp" (the upnp package)
|
|
|
|
- "xdr" (the xdr package)
|
|
|
|
- "all" (all of the above)
|
2014-04-14 12:13:50 +02:00
|
|
|
|
2015-01-27 22:21:39 +01:00
|
|
|
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
|
|
|
|
profiler with HTTP access.
|
2014-08-13 14:38:23 +02:00
|
|
|
|
2015-01-27 22:21:39 +01:00
|
|
|
STCPUPROFILE Write a CPU profile to cpu-$pid.pprof on exit.
|
2014-08-13 14:38:23 +02:00
|
|
|
|
2015-01-27 22:21:39 +01:00
|
|
|
STHEAPPROFILE Write heap profiles to heap-$pid-$timestamp.pprof each time
|
|
|
|
heap usage increases.
|
2014-08-13 14:38:23 +02:00
|
|
|
|
2015-01-27 22:21:39 +01:00
|
|
|
STBLOCKPROFILE Write block profiles to block-$pid-$timestamp.pprof every 20
|
|
|
|
seconds.
|
2014-08-13 14:38:23 +02:00
|
|
|
|
2015-01-27 22:21:39 +01:00
|
|
|
STPERFSTATS Write running performance statistics to perf-$pid.csv. Not
|
|
|
|
supported on Windows.
|
2014-11-26 19:48:31 +01:00
|
|
|
|
2015-01-27 22:21:39 +01:00
|
|
|
STNOUPGRADE Disable automatic upgrades.
|
|
|
|
|
|
|
|
GOMAXPROCS Set the maximum number of CPU cores to use. Defaults to all
|
2015-03-29 19:05:22 +02:00
|
|
|
available CPU cores.
|
|
|
|
|
|
|
|
GOGC Percentage of heap growth at which to trigger GC. Default is
|
|
|
|
100. Lower numbers keep peak memory usage down, at the price
|
|
|
|
of CPU usage (ie. performance).`
|
2014-03-09 08:48:29 +01:00
|
|
|
)
|
|
|
|
|
2014-10-06 17:55:54 +02:00
|
|
|
// Command line and environment options
|
2014-09-02 13:08:24 +02:00
|
|
|
var (
|
|
|
|
reset bool
|
|
|
|
showVersion bool
|
|
|
|
doUpgrade bool
|
|
|
|
doUpgradeCheck bool
|
2014-12-22 12:07:04 +01:00
|
|
|
upgradeTo string
|
2014-09-02 13:08:24 +02:00
|
|
|
noBrowser bool
|
2014-12-04 21:25:35 +01:00
|
|
|
noConsole bool
|
2014-09-02 13:08:24 +02:00
|
|
|
generateDir string
|
2014-10-19 14:57:03 +02:00
|
|
|
logFile string
|
2015-04-25 11:21:47 +02:00
|
|
|
auditEnabled bool
|
2015-04-30 20:21:48 +02:00
|
|
|
verbose bool
|
2014-10-06 17:55:54 +02:00
|
|
|
noRestart = os.Getenv("STNORESTART") != ""
|
2014-11-26 19:48:31 +01:00
|
|
|
noUpgrade = os.Getenv("STNOUPGRADE") != ""
|
2014-10-06 17:55:54 +02:00
|
|
|
guiAddress = os.Getenv("STGUIADDRESS") // legacy
|
|
|
|
guiAuthentication = os.Getenv("STGUIAUTH") // legacy
|
|
|
|
guiAPIKey = os.Getenv("STGUIAPIKEY") // legacy
|
|
|
|
profiler = os.Getenv("STPROFILER")
|
|
|
|
guiAssets = os.Getenv("STGUIASSETS")
|
|
|
|
cpuProfile = os.Getenv("STCPUPROFILE") != ""
|
|
|
|
stRestarting = os.Getenv("STRESTART") != ""
|
|
|
|
innerProcess = os.Getenv("STNORESTART") != "" || os.Getenv("STMONITORED") != ""
|
2014-09-02 13:08:24 +02:00
|
|
|
)
|
|
|
|
|
2013-12-15 11:43:31 +01:00
|
|
|
func main() {
|
2014-10-19 14:57:03 +02:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
// On Windows, we use a log file by default. Setting the -logfile flag
|
2015-03-29 12:55:27 +02:00
|
|
|
// to "-" disables this behavior.
|
2014-10-19 14:57:03 +02:00
|
|
|
|
2015-03-29 12:55:27 +02:00
|
|
|
flag.StringVar(&logFile, "logfile", "", "Log file name (use \"-\" for stdout)")
|
2014-12-04 21:25:35 +01:00
|
|
|
|
|
|
|
// We also add an option to hide the console window
|
|
|
|
flag.BoolVar(&noConsole, "no-console", false, "Hide console window")
|
2014-10-19 14:57:03 +02:00
|
|
|
}
|
|
|
|
|
2014-10-14 21:11:05 +02:00
|
|
|
flag.StringVar(&generateDir, "generate", "", "Generate key and config in specified dir, then exit")
|
2014-10-06 17:55:54 +02:00
|
|
|
flag.StringVar(&guiAddress, "gui-address", guiAddress, "Override GUI address")
|
|
|
|
flag.StringVar(&guiAuthentication, "gui-authentication", guiAuthentication, "Override GUI authentication; username:password")
|
|
|
|
flag.StringVar(&guiAPIKey, "gui-apikey", guiAPIKey, "Override GUI API key")
|
|
|
|
flag.StringVar(&confDir, "home", "", "Set configuration directory")
|
|
|
|
flag.IntVar(&logFlags, "logflags", logFlags, "Select information in log line prefix")
|
|
|
|
flag.BoolVar(&noBrowser, "no-browser", false, "Do not start browser")
|
|
|
|
flag.BoolVar(&noRestart, "no-restart", noRestart, "Do not restart; just exit")
|
2015-04-03 20:06:03 +02:00
|
|
|
flag.BoolVar(&reset, "reset", false, "Reset the database")
|
2014-05-02 10:01:09 +02:00
|
|
|
flag.BoolVar(&doUpgrade, "upgrade", false, "Perform upgrade")
|
2014-07-31 10:26:45 +02:00
|
|
|
flag.BoolVar(&doUpgradeCheck, "upgrade-check", false, "Check for available upgrade")
|
2014-10-06 17:55:54 +02:00
|
|
|
flag.BoolVar(&showVersion, "version", false, "Show version")
|
2014-12-22 12:07:04 +01:00
|
|
|
flag.StringVar(&upgradeTo, "upgrade-to", upgradeTo, "Force upgrade directly from specified URL")
|
2015-04-25 11:21:47 +02:00
|
|
|
flag.BoolVar(&auditEnabled, "audit", false, "Write events to audit file")
|
2015-04-30 20:21:48 +02:00
|
|
|
flag.BoolVar(&verbose, "verbose", false, "Print verbose log output")
|
2014-10-06 17:55:54 +02:00
|
|
|
|
2015-03-29 12:55:27 +02:00
|
|
|
flag.Usage = usageFor(flag.CommandLine, usage, fmt.Sprintf(extraUsage, baseDirs["config"]))
|
2014-01-26 14:28:41 +01:00
|
|
|
flag.Parse()
|
2014-01-08 14:37:33 +01:00
|
|
|
|
2014-12-04 21:25:35 +01:00
|
|
|
if noConsole {
|
|
|
|
osutil.HideConsole()
|
|
|
|
}
|
|
|
|
|
2015-03-29 12:55:27 +02:00
|
|
|
if confDir != "" {
|
2014-10-06 17:55:54 +02:00
|
|
|
// Not set as default above because the string can be really long.
|
2015-03-29 12:55:27 +02:00
|
|
|
baseDirs["config"] = confDir
|
|
|
|
}
|
|
|
|
|
2015-04-03 20:22:39 +02:00
|
|
|
if err := expandLocations(); err != nil {
|
|
|
|
l.Fatalln(err)
|
|
|
|
}
|
|
|
|
|
2015-06-14 22:28:38 +02:00
|
|
|
if guiAssets == "" {
|
|
|
|
guiAssets = locations[locGUIAssets]
|
|
|
|
}
|
|
|
|
|
2015-03-29 12:55:27 +02:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
if logFile == "" {
|
|
|
|
// Use the default log file location
|
|
|
|
logFile = locations[locLogFile]
|
|
|
|
} else if logFile == "-" {
|
|
|
|
// Don't use a logFile
|
|
|
|
logFile = ""
|
|
|
|
}
|
2014-10-06 17:55:54 +02:00
|
|
|
}
|
|
|
|
|
2014-01-26 14:28:41 +01:00
|
|
|
if showVersion {
|
2014-04-19 16:40:19 +02:00
|
|
|
fmt.Println(LongVersion)
|
2014-04-14 12:13:50 +02:00
|
|
|
return
|
2013-12-15 11:43:31 +01:00
|
|
|
}
|
2014-01-08 14:37:33 +01:00
|
|
|
|
2014-06-04 10:24:30 +02:00
|
|
|
l.SetFlags(logFlags)
|
|
|
|
|
2014-08-03 09:41:08 +02:00
|
|
|
if generateDir != "" {
|
2014-10-06 09:25:45 +02:00
|
|
|
dir, err := osutil.ExpandTilde(generateDir)
|
|
|
|
if err != nil {
|
|
|
|
l.Fatalln("generate:", err)
|
|
|
|
}
|
2014-08-03 09:41:08 +02:00
|
|
|
|
|
|
|
info, err := os.Stat(dir)
|
2014-12-12 21:39:03 +01:00
|
|
|
if err == nil && !info.IsDir() {
|
2014-08-03 09:41:08 +02:00
|
|
|
l.Fatalln(dir, "is not a directory")
|
|
|
|
}
|
2014-12-16 22:40:04 +01:00
|
|
|
if err != nil && os.IsNotExist(err) {
|
2015-05-20 22:46:37 +02:00
|
|
|
err = osutil.MkdirAll(dir, 0700)
|
2014-12-12 21:39:03 +01:00
|
|
|
if err != nil {
|
2014-12-16 22:40:04 +01:00
|
|
|
l.Fatalln("generate:", err)
|
2014-12-12 21:39:03 +01:00
|
|
|
}
|
|
|
|
}
|
2014-08-03 09:41:08 +02:00
|
|
|
|
2015-03-29 12:55:27 +02:00
|
|
|
certFile, keyFile := filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem")
|
|
|
|
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
2014-08-03 09:41:08 +02:00
|
|
|
if err == nil {
|
|
|
|
l.Warnln("Key exists; will not overwrite.")
|
2014-09-28 13:00:38 +02:00
|
|
|
l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0]))
|
2014-10-14 21:11:05 +02:00
|
|
|
} else {
|
2015-03-29 12:55:27 +02:00
|
|
|
cert, err = newCertificate(certFile, keyFile, tlsDefaultCommonName)
|
2014-10-14 21:11:05 +02:00
|
|
|
myID = protocol.NewDeviceID(cert.Certificate[0])
|
|
|
|
if err != nil {
|
|
|
|
l.Fatalln("load cert:", err)
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0]))
|
|
|
|
}
|
2014-08-03 09:41:08 +02:00
|
|
|
}
|
|
|
|
|
2014-10-14 21:11:05 +02:00
|
|
|
cfgFile := filepath.Join(dir, "config.xml")
|
|
|
|
if _, err := os.Stat(cfgFile); err == nil {
|
|
|
|
l.Warnln("Config exists; will not overwrite.")
|
|
|
|
return
|
2014-09-20 15:42:20 +02:00
|
|
|
}
|
2014-10-14 21:11:05 +02:00
|
|
|
var myName, _ = os.Hostname()
|
|
|
|
var newCfg = defaultConfig(myName)
|
|
|
|
var cfg = config.Wrap(cfgFile, newCfg)
|
|
|
|
err = cfg.Save()
|
|
|
|
if err != nil {
|
|
|
|
l.Warnln("Failed to save config", err)
|
2014-08-03 09:41:08 +02:00
|
|
|
}
|
2014-10-14 21:11:05 +02:00
|
|
|
|
2014-08-03 09:41:08 +02:00
|
|
|
return
|
2014-07-31 21:29:44 +02:00
|
|
|
}
|
|
|
|
|
2015-03-29 12:55:27 +02:00
|
|
|
if info, err := os.Stat(baseDirs["config"]); err == nil && !info.IsDir() {
|
|
|
|
l.Fatalln("Config directory", baseDirs["config"], "is not a directory")
|
2014-09-18 00:19:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that our home directory exists.
|
2015-03-29 12:55:27 +02:00
|
|
|
ensureDir(baseDirs["config"], 0700)
|
2014-09-18 00:19:23 +02:00
|
|
|
|
2014-12-22 12:07:04 +01:00
|
|
|
if upgradeTo != "" {
|
|
|
|
err := upgrade.ToURL(upgradeTo)
|
|
|
|
if err != nil {
|
|
|
|
l.Fatalln("Upgrade:", err) // exits 1
|
|
|
|
}
|
|
|
|
l.Okln("Upgraded from", upgradeTo)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-07-31 16:01:23 +02:00
|
|
|
if doUpgrade || doUpgradeCheck {
|
2015-04-09 22:44:36 +02:00
|
|
|
rel, err := upgrade.LatestRelease(Version)
|
2014-05-02 10:01:09 +02:00
|
|
|
if err != nil {
|
2014-07-31 16:01:23 +02:00
|
|
|
l.Fatalln("Upgrade:", err) // exits 1
|
2014-05-02 10:01:09 +02:00
|
|
|
}
|
|
|
|
|
2014-07-31 16:01:23 +02:00
|
|
|
if upgrade.CompareVersions(rel.Tag, Version) <= 0 {
|
|
|
|
l.Infof("No upgrade available (current %q >= latest %q).", Version, rel.Tag)
|
2014-09-02 13:08:24 +02:00
|
|
|
os.Exit(exitNoUpgradeAvailable)
|
2014-07-31 10:26:45 +02:00
|
|
|
}
|
|
|
|
|
2014-07-31 16:01:23 +02:00
|
|
|
l.Infof("Upgrade available (current %q < latest %q)", Version, rel.Tag)
|
|
|
|
|
|
|
|
if doUpgrade {
|
2014-09-18 00:19:23 +02:00
|
|
|
// Use leveldb database locks to protect against concurrent upgrades
|
2015-03-29 12:55:27 +02:00
|
|
|
_, err = leveldb.OpenFile(locations[locDatabase], &opt.Options{OpenFilesCacheCapacity: 100})
|
2014-09-18 00:19:23 +02:00
|
|
|
if err != nil {
|
2015-04-30 21:33:32 +02:00
|
|
|
l.Infoln("Attempting upgrade through running Syncthing...")
|
|
|
|
err = upgradeViaRest()
|
|
|
|
if err != nil {
|
|
|
|
l.Fatalln("Upgrade:", err)
|
|
|
|
}
|
|
|
|
l.Okln("Syncthing upgrading")
|
|
|
|
return
|
2014-09-18 00:19:23 +02:00
|
|
|
}
|
|
|
|
|
2014-12-08 16:36:15 +01:00
|
|
|
err = upgrade.To(rel)
|
2014-07-31 16:01:23 +02:00
|
|
|
if err != nil {
|
|
|
|
l.Fatalln("Upgrade:", err) // exits 1
|
|
|
|
}
|
|
|
|
l.Okf("Upgraded to %q", rel.Tag)
|
2014-07-31 10:26:45 +02:00
|
|
|
}
|
2014-12-08 16:36:15 +01:00
|
|
|
|
|
|
|
return
|
2014-07-31 10:26:45 +02:00
|
|
|
}
|
|
|
|
|
2014-09-02 13:08:24 +02:00
|
|
|
if reset {
|
2015-04-03 20:06:03 +02:00
|
|
|
resetDB()
|
2014-09-02 13:08:24 +02:00
|
|
|
return
|
2014-08-03 09:41:08 +02:00
|
|
|
}
|
|
|
|
|
2014-10-06 17:55:54 +02:00
|
|
|
if noRestart {
|
2014-09-02 13:08:24 +02:00
|
|
|
syncthingMain()
|
|
|
|
} else {
|
|
|
|
monitorMain()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-30 21:33:32 +02:00
|
|
|
func upgradeViaRest() error {
|
|
|
|
cfg, err := config.Load(locations[locConfigFile], protocol.LocalDeviceID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
target := cfg.GUI().Address
|
|
|
|
if cfg.GUI().UseTLS {
|
|
|
|
target = "https://" + target
|
|
|
|
} else {
|
|
|
|
target = "http://" + target
|
|
|
|
}
|
|
|
|
r, _ := http.NewRequest("POST", target+"/rest/system/upgrade", nil)
|
|
|
|
r.Header.Set("X-API-Key", cfg.GUI().APIKey)
|
|
|
|
|
|
|
|
tr := &http.Transport{
|
|
|
|
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
|
|
|
}
|
|
|
|
client := &http.Client{
|
|
|
|
Transport: tr,
|
|
|
|
Timeout: 60 * time.Second,
|
|
|
|
}
|
|
|
|
resp, err := client.Do(r)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if resp.StatusCode != 200 {
|
|
|
|
bs, err := ioutil.ReadAll(resp.Body)
|
|
|
|
defer resp.Body.Close()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return errors.New(string(bs))
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-09-02 13:08:24 +02:00
|
|
|
func syncthingMain() {
|
2015-04-25 11:19:53 +02:00
|
|
|
// Create a main service manager. We'll add things to this as we go along.
|
2015-07-11 03:12:20 +02:00
|
|
|
// We want any logging it does to go through our log system.
|
2015-04-25 11:19:53 +02:00
|
|
|
mainSvc := suture.New("main", suture.Spec{
|
|
|
|
Log: func(line string) {
|
2015-06-03 09:47:39 +02:00
|
|
|
if debugSuture {
|
|
|
|
l.Debugln(line)
|
|
|
|
}
|
2015-04-25 11:19:53 +02:00
|
|
|
},
|
|
|
|
})
|
|
|
|
mainSvc.ServeBackground()
|
2014-09-02 13:08:24 +02:00
|
|
|
|
2015-04-25 11:21:47 +02:00
|
|
|
// Set a log prefix similar to the ID we will have later on, or early log
|
|
|
|
// lines look ugly.
|
|
|
|
l.SetPrefix("[start] ")
|
|
|
|
|
|
|
|
if auditEnabled {
|
|
|
|
startAuditing(mainSvc)
|
|
|
|
}
|
|
|
|
|
2015-04-30 20:21:48 +02:00
|
|
|
if verbose {
|
|
|
|
mainSvc.Add(newVerboseSvc())
|
|
|
|
}
|
|
|
|
|
2015-06-16 09:17:58 +02:00
|
|
|
// Event subscription for the API; must start early to catch the early events.
|
|
|
|
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000)
|
|
|
|
|
2014-01-10 00:09:27 +01:00
|
|
|
if len(os.Getenv("GOMAXPROCS")) == 0 {
|
|
|
|
runtime.GOMAXPROCS(runtime.NumCPU())
|
|
|
|
}
|
|
|
|
|
2015-08-15 15:20:58 +02:00
|
|
|
// Attempt to increase the limit on number of open files to the maximum
|
|
|
|
// allowed, in case we have many peers. We don't really care enough to
|
|
|
|
// report the error if there is one.
|
|
|
|
osutil.MaximizeOpenFileLimit()
|
|
|
|
|
2014-09-18 00:19:23 +02:00
|
|
|
// Ensure that that we have a certificate and key.
|
2015-04-25 11:19:53 +02:00
|
|
|
cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
|
2013-12-15 11:43:31 +01:00
|
|
|
if err != nil {
|
2015-03-29 12:55:27 +02:00
|
|
|
cert, err = newCertificate(locations[locCertFile], locations[locKeyFile], tlsDefaultCommonName)
|
2014-09-20 15:42:20 +02:00
|
|
|
if err != nil {
|
|
|
|
l.Fatalln("load cert:", err)
|
|
|
|
}
|
2013-12-15 11:43:31 +01:00
|
|
|
}
|
|
|
|
|
2014-12-07 16:41:24 +01:00
|
|
|
// We reinitialize the predictable RNG with our device ID, to get a
|
|
|
|
// sequence that is always the same but unique to this syncthing instance.
|
|
|
|
predictableRandom.Seed(seedFromBytes(cert.Certificate[0]))
|
|
|
|
|
2014-09-28 13:00:38 +02:00
|
|
|
myID = protocol.NewDeviceID(cert.Certificate[0])
|
2014-06-30 01:42:03 +02:00
|
|
|
l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))
|
2013-12-15 11:43:31 +01:00
|
|
|
|
2014-05-15 02:08:56 +02:00
|
|
|
l.Infoln(LongVersion)
|
|
|
|
l.Infoln("My ID:", myID)
|
2014-02-03 16:01:17 +01:00
|
|
|
|
2015-06-18 15:22:45 +02:00
|
|
|
// Emit the Starting event, now that we know who we are.
|
|
|
|
|
|
|
|
events.Default.Log(events.Starting, map[string]string{
|
|
|
|
"home": baseDirs["config"],
|
|
|
|
"myID": myID.String(),
|
|
|
|
})
|
|
|
|
|
2014-02-01 20:23:19 +01:00
|
|
|
// Prepare to be able to save configuration
|
|
|
|
|
2015-03-29 12:55:27 +02:00
|
|
|
cfgFile := locations[locConfigFile]
|
2014-02-01 20:23:19 +01:00
|
|
|
|
2014-08-15 00:15:26 +02:00
|
|
|
var myName string
|
|
|
|
|
2014-01-22 14:28:14 +01:00
|
|
|
// Load the configuration file, if it exists.
|
|
|
|
// If it does not, create a template.
|
|
|
|
|
2014-10-13 15:12:01 +02:00
|
|
|
if info, err := os.Stat(cfgFile); err == nil {
|
2014-10-13 15:59:02 +02:00
|
|
|
if !info.Mode().IsRegular() {
|
|
|
|
l.Fatalln("Config file is not a file?")
|
2014-10-13 15:12:01 +02:00
|
|
|
}
|
|
|
|
cfg, err = config.Load(cfgFile, myID)
|
|
|
|
if err == nil {
|
|
|
|
myCfg := cfg.Devices()[myID]
|
|
|
|
if myCfg.Name == "" {
|
|
|
|
myName, _ = os.Hostname()
|
|
|
|
} else {
|
|
|
|
myName = myCfg.Name
|
|
|
|
}
|
2014-08-15 00:15:26 +02:00
|
|
|
} else {
|
2014-10-13 15:59:02 +02:00
|
|
|
l.Fatalln("Configuration:", err)
|
2014-08-15 00:15:26 +02:00
|
|
|
}
|
2014-04-23 10:28:36 +02:00
|
|
|
} else {
|
2014-05-15 02:08:56 +02:00
|
|
|
l.Infoln("No config file; starting with empty defaults")
|
2014-08-15 00:15:26 +02:00
|
|
|
myName, _ = os.Hostname()
|
2014-10-10 17:32:43 +02:00
|
|
|
newCfg := defaultConfig(myName)
|
2014-10-06 09:25:45 +02:00
|
|
|
cfg = config.Wrap(cfgFile, newCfg)
|
2014-09-06 14:11:18 +02:00
|
|
|
cfg.Save()
|
2014-05-15 02:08:56 +02:00
|
|
|
l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
|
2014-02-01 20:23:19 +01:00
|
|
|
}
|
2014-01-26 14:28:41 +01:00
|
|
|
|
2014-10-08 13:52:05 +02:00
|
|
|
if cfg.Raw().OriginalVersion != config.CurrentVersion {
|
|
|
|
l.Infoln("Archiving a copy of old config file format")
|
|
|
|
// Archive a copy
|
|
|
|
osutil.Rename(cfgFile, cfgFile+fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion))
|
|
|
|
// Save the new version
|
|
|
|
cfg.Save()
|
|
|
|
}
|
|
|
|
|
2015-04-09 12:53:13 +02:00
|
|
|
if err := checkShortIDs(cfg); err != nil {
|
|
|
|
l.Fatalln("Short device IDs are in conflict. Unlucky!\n Regenerate the device ID of one if the following:\n ", err)
|
|
|
|
}
|
|
|
|
|
2014-10-06 17:55:54 +02:00
|
|
|
if len(profiler) > 0 {
|
2013-12-15 11:43:31 +01:00
|
|
|
go func() {
|
2014-05-15 02:08:56 +02:00
|
|
|
l.Debugln("Starting profiler on", profiler)
|
2014-05-20 18:41:01 +02:00
|
|
|
runtime.SetBlockProfileRate(1)
|
2014-01-26 14:28:41 +01:00
|
|
|
err := http.ListenAndServe(profiler, nil)
|
2013-12-18 19:36:28 +01:00
|
|
|
if err != nil {
|
2014-05-15 02:08:56 +02:00
|
|
|
l.Fatalln(err)
|
2013-12-18 19:36:28 +01:00
|
|
|
}
|
2013-12-15 11:43:31 +01:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// The TLS configuration is used for both the listening socket and outgoing
|
|
|
|
// connections.
|
|
|
|
|
2014-02-01 20:23:19 +01:00
|
|
|
tlsCfg := &tls.Config{
|
2014-01-09 09:28:08 +01:00
|
|
|
Certificates: []tls.Certificate{cert},
|
2015-03-05 15:58:16 +01:00
|
|
|
NextProtos: []string{bepProtocolName},
|
2014-01-09 09:28:08 +01:00
|
|
|
ClientAuth: tls.RequestClientCert,
|
|
|
|
SessionTicketsDisabled: true,
|
|
|
|
InsecureSkipVerify: true,
|
|
|
|
MinVersion: tls.VersionTLS12,
|
2014-11-12 10:47:34 +01:00
|
|
|
CipherSuites: []uint16{
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
|
|
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
|
|
|
},
|
2013-12-15 11:43:31 +01:00
|
|
|
}
|
|
|
|
|
2014-09-08 17:25:55 +02:00
|
|
|
// If the read or write rate should be limited, set up a rate limiter for it.
|
2014-04-01 20:36:54 +02:00
|
|
|
// This will be used on connections created in the connect and listen routines.
|
|
|
|
|
2014-10-06 09:25:45 +02:00
|
|
|
opts := cfg.Options()
|
|
|
|
|
2014-12-01 11:26:54 +01:00
|
|
|
if !opts.SymlinksEnabled {
|
2014-11-30 23:10:32 +01:00
|
|
|
symlinks.Supported = false
|
|
|
|
}
|
|
|
|
|
2015-06-27 13:34:41 +02:00
|
|
|
protocol.PingTimeout = time.Duration(opts.PingTimeoutS) * time.Second
|
|
|
|
protocol.PingIdleTime = time.Duration(opts.PingIdleTimeS) * time.Second
|
|
|
|
|
2014-10-06 09:25:45 +02:00
|
|
|
if opts.MaxSendKbps > 0 {
|
|
|
|
writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxSendKbps), int64(5*1000*opts.MaxSendKbps))
|
2014-09-08 17:25:55 +02:00
|
|
|
}
|
2014-10-06 09:25:45 +02:00
|
|
|
if opts.MaxRecvKbps > 0 {
|
|
|
|
readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps))
|
2014-01-12 16:59:35 +01:00
|
|
|
}
|
2015-03-08 19:36:59 +01:00
|
|
|
|
2015-03-19 12:07:20 +01:00
|
|
|
if (opts.MaxRecvKbps > 0 || opts.MaxSendKbps > 0) && !opts.LimitBandwidthInLan {
|
2015-03-08 19:36:59 +01:00
|
|
|
lans, _ = osutil.GetLans()
|
2015-03-19 12:07:20 +01:00
|
|
|
networks := make([]string, 0, len(lans))
|
|
|
|
for _, lan := range lans {
|
|
|
|
networks = append(networks, lan.String())
|
|
|
|
}
|
|
|
|
l.Infoln("Local networks:", strings.Join(networks, ", "))
|
2015-03-08 19:36:59 +01:00
|
|
|
}
|
2013-12-15 11:43:31 +01:00
|
|
|
|
2015-03-29 12:55:27 +02:00
|
|
|
dbFile := locations[locDatabase]
|
2015-04-30 20:25:44 +02:00
|
|
|
ldb, err := leveldb.OpenFile(dbFile, dbOpts())
|
2015-08-16 16:10:16 +02:00
|
|
|
if leveldbIsCorrupted(err) {
|
2015-04-30 20:25:44 +02:00
|
|
|
ldb, err = leveldb.RecoverFile(dbFile, dbOpts())
|
2015-02-23 08:21:05 +01:00
|
|
|
}
|
2015-08-16 16:10:16 +02:00
|
|
|
if leveldbIsCorrupted(err) {
|
|
|
|
// The database is corrupted, and we've tried to recover it but it
|
|
|
|
// didn't work. At this point there isn't much to do beyond dropping
|
|
|
|
// the database and reindexing...
|
|
|
|
l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
|
|
|
|
if err := resetDB(); err != nil {
|
|
|
|
l.Fatalln("Remove database:", err)
|
|
|
|
}
|
|
|
|
ldb, err = leveldb.OpenFile(dbFile, dbOpts())
|
|
|
|
}
|
2014-07-06 14:46:48 +02:00
|
|
|
if err != nil {
|
2014-08-17 01:03:41 +02:00
|
|
|
l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
|
2014-07-06 14:46:48 +02:00
|
|
|
}
|
2014-08-31 13:34:17 +02:00
|
|
|
|
2014-09-28 13:00:38 +02:00
|
|
|
// Remove database entries for folders that no longer exist in the config
|
2014-10-06 09:25:45 +02:00
|
|
|
folders := cfg.Folders()
|
2015-01-12 14:50:30 +01:00
|
|
|
for _, folder := range db.ListFolders(ldb) {
|
2014-10-06 09:25:45 +02:00
|
|
|
if _, ok := folders[folder]; !ok {
|
2014-09-28 13:00:38 +02:00
|
|
|
l.Infof("Cleaning data for dropped folder %q", folder)
|
2015-01-12 14:50:30 +01:00
|
|
|
db.DropFolder(ldb, folder)
|
2014-08-31 13:34:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-25 22:37:35 +01:00
|
|
|
m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb)
|
2015-06-03 09:47:39 +02:00
|
|
|
cfg.Subscribe(m)
|
2014-04-01 20:36:54 +02:00
|
|
|
|
2015-04-08 14:35:03 +02:00
|
|
|
if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
|
|
|
|
it, err := strconv.Atoi(t)
|
|
|
|
if err == nil {
|
|
|
|
m.StartDeadlockDetector(time.Duration(it) * time.Second)
|
|
|
|
}
|
|
|
|
} else if !IsRelease || IsBeta {
|
2015-07-24 20:55:52 +02:00
|
|
|
m.StartDeadlockDetector(20 * time.Minute)
|
2015-04-08 14:35:03 +02:00
|
|
|
}
|
|
|
|
|
2014-09-28 13:00:38 +02:00
|
|
|
// Clear out old indexes for other devices. Otherwise we'll start up and
|
2014-08-05 12:20:50 +02:00
|
|
|
// start needing a bunch of files which are nowhere to be found. This
|
|
|
|
// needs to be changed when we correctly do persistent indexes.
|
2014-10-06 09:25:45 +02:00
|
|
|
for _, folderCfg := range cfg.Folders() {
|
2015-03-28 15:25:42 +01:00
|
|
|
m.AddFolder(folderCfg)
|
2014-09-28 13:00:38 +02:00
|
|
|
for _, device := range folderCfg.DeviceIDs() {
|
|
|
|
if device == myID {
|
2014-09-01 17:48:39 +02:00
|
|
|
continue
|
|
|
|
}
|
2015-01-14 23:11:31 +01:00
|
|
|
m.Index(device, folderCfg.ID, nil, 0, nil)
|
2014-08-05 12:20:50 +02:00
|
|
|
}
|
2015-06-08 11:04:09 +02:00
|
|
|
// Routine to pull blocks from other devices to synchronize the local
|
|
|
|
// folder. Does not run when we are in read only (publish only) mode.
|
|
|
|
if folderCfg.ReadOnly {
|
|
|
|
m.StartFolderRO(folderCfg.ID)
|
|
|
|
} else {
|
|
|
|
m.StartFolderRW(folderCfg.ID)
|
|
|
|
}
|
2014-08-05 12:20:50 +02:00
|
|
|
}
|
|
|
|
|
2015-06-20 20:04:47 +02:00
|
|
|
mainSvc.Add(m)
|
|
|
|
|
2015-06-08 11:04:09 +02:00
|
|
|
// GUI
|
|
|
|
|
2015-06-16 09:17:58 +02:00
|
|
|
setupGUI(mainSvc, cfg, m, apiSub)
|
2015-06-08 11:04:09 +02:00
|
|
|
|
2014-09-10 08:18:30 +02:00
|
|
|
// The default port we announce, possibly modified by setupUPnP next.
|
|
|
|
|
2015-06-23 14:55:30 +02:00
|
|
|
uri, err := url.Parse(opts.ListenAddress[0])
|
|
|
|
if err != nil {
|
|
|
|
l.Fatalf("Failed to parse listen address %s: %v", opts.ListenAddress[0], err)
|
|
|
|
}
|
|
|
|
|
|
|
|
addr, err := net.ResolveTCPAddr("tcp", uri.Host)
|
2014-09-10 08:18:30 +02:00
|
|
|
if err != nil {
|
|
|
|
l.Fatalln("Bad listen address:", err)
|
|
|
|
}
|
|
|
|
|
2015-06-28 21:09:53 +02:00
|
|
|
// Start the relevant services
|
|
|
|
|
|
|
|
connectionSvc := newConnectionSvc(cfg, myID, m, tlsCfg)
|
|
|
|
mainSvc.Add(connectionSvc)
|
|
|
|
|
2015-07-24 21:07:26 +02:00
|
|
|
if opts.RelaysEnabled && (opts.GlobalAnnEnabled || opts.RelayWithoutGlobalAnn) {
|
2015-07-24 20:55:52 +02:00
|
|
|
relaySvc = relay.NewSvc(cfg, tlsCfg, connectionSvc.conns)
|
|
|
|
connectionSvc.Add(relaySvc)
|
|
|
|
}
|
|
|
|
|
2015-04-25 13:52:07 +02:00
|
|
|
// Start discovery
|
|
|
|
|
|
|
|
localPort := addr.Port
|
2015-07-17 22:22:07 +02:00
|
|
|
discoverer = discovery(localPort, relaySvc)
|
2015-04-25 13:52:07 +02:00
|
|
|
|
|
|
|
// Start UPnP. The UPnP service will restart global discovery if the
|
|
|
|
// external port changes.
|
2014-04-18 13:20:42 +02:00
|
|
|
|
2014-10-06 09:25:45 +02:00
|
|
|
if opts.UPnPEnabled {
|
2015-04-25 13:52:07 +02:00
|
|
|
upnpSvc := newUPnPSvc(cfg, localPort)
|
|
|
|
mainSvc.Add(upnpSvc)
|
2014-04-18 13:20:42 +02:00
|
|
|
}
|
|
|
|
|
2014-10-06 17:55:54 +02:00
|
|
|
if cpuProfile {
|
2014-08-13 14:38:23 +02:00
|
|
|
f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
|
2014-04-14 12:13:50 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
pprof.StartCPUProfile(f)
|
|
|
|
}
|
|
|
|
|
2014-10-06 09:25:45 +02:00
|
|
|
for _, device := range cfg.Devices() {
|
2014-09-28 13:00:38 +02:00
|
|
|
if len(device.Name) > 0 {
|
|
|
|
l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses)
|
2014-05-24 21:39:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-06 09:25:45 +02:00
|
|
|
if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion {
|
2014-06-11 20:04:23 +02:00
|
|
|
l.Infoln("Anonymous usage report has changed; revoking acceptance")
|
2014-10-06 09:25:45 +02:00
|
|
|
opts.URAccepted = 0
|
2014-11-27 10:00:07 +01:00
|
|
|
opts.URUniqueID = ""
|
2014-10-06 09:25:45 +02:00
|
|
|
cfg.SetOptions(opts)
|
2014-06-11 20:04:23 +02:00
|
|
|
}
|
2014-10-06 09:25:45 +02:00
|
|
|
if opts.URAccepted >= usageReportVersion {
|
2014-11-27 10:00:07 +01:00
|
|
|
if opts.URUniqueID == "" {
|
|
|
|
// Previously the ID was generated from the node ID. We now need
|
|
|
|
// to generate a new one.
|
2014-12-07 16:41:24 +01:00
|
|
|
opts.URUniqueID = randomString(8)
|
2014-12-07 15:49:11 +01:00
|
|
|
cfg.SetOptions(opts)
|
|
|
|
cfg.Save()
|
2014-11-27 10:00:07 +01:00
|
|
|
}
|
2014-06-11 20:04:23 +02:00
|
|
|
}
|
|
|
|
|
2015-05-12 09:35:37 +02:00
|
|
|
// The usageReportingManager registers itself to listen to configuration
|
|
|
|
// changes, and there's nothing more we need to tell it from the outside.
|
|
|
|
// Hence we don't keep the returned pointer.
|
|
|
|
newUsageReportingManager(m, cfg)
|
|
|
|
|
2014-10-06 09:25:45 +02:00
|
|
|
if opts.RestartOnWakeup {
|
2014-09-10 22:24:53 +02:00
|
|
|
go standbyMonitor()
|
|
|
|
}
|
2014-08-27 23:38:36 +02:00
|
|
|
|
2014-10-06 09:25:45 +02:00
|
|
|
if opts.AutoUpgradeIntervalH > 0 {
|
2014-11-26 19:48:31 +01:00
|
|
|
if noUpgrade {
|
|
|
|
l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.")
|
|
|
|
} else if IsRelease {
|
2014-10-23 19:09:53 +02:00
|
|
|
go autoUpgrade()
|
|
|
|
} else {
|
2015-02-14 16:10:43 +01:00
|
|
|
l.Infof("No automatic upgrades; %s is not a release version.", Version)
|
2014-10-23 19:09:53 +02:00
|
|
|
}
|
2014-09-26 00:51:12 +02:00
|
|
|
}
|
|
|
|
|
2015-06-16 09:27:06 +02:00
|
|
|
events.Default.Log(events.StartupComplete, map[string]string{
|
|
|
|
"myID": myID.String(),
|
|
|
|
})
|
2015-03-26 23:26:51 +01:00
|
|
|
go generatePingEvents()
|
2014-07-13 21:07:24 +02:00
|
|
|
|
2015-04-07 09:25:28 +02:00
|
|
|
cleanConfigDirectory()
|
|
|
|
|
2014-09-02 13:08:24 +02:00
|
|
|
code := <-stop
|
2014-07-13 21:07:24 +02:00
|
|
|
|
2015-04-25 11:19:53 +02:00
|
|
|
mainSvc.Stop()
|
|
|
|
|
2014-05-24 12:28:36 +02:00
|
|
|
l.Okln("Exiting")
|
2015-07-20 15:34:40 +02:00
|
|
|
|
|
|
|
if cpuProfile {
|
|
|
|
pprof.StopCPUProfile()
|
|
|
|
}
|
|
|
|
|
2014-09-02 13:08:24 +02:00
|
|
|
os.Exit(code)
|
2014-05-24 12:28:36 +02:00
|
|
|
}
|
|
|
|
|
2015-04-30 20:25:44 +02:00
|
|
|
func dbOpts() *opt.Options {
|
2015-05-14 20:57:17 +02:00
|
|
|
// Calculate a suitable database block cache capacity.
|
|
|
|
|
2015-05-23 21:05:52 +02:00
|
|
|
// Default is 8 MiB.
|
2015-04-30 20:25:44 +02:00
|
|
|
blockCacheCapacity := 8 << 20
|
2015-05-14 20:57:17 +02:00
|
|
|
// Increase block cache up to this maximum:
|
|
|
|
const maxCapacity = 64 << 20
|
|
|
|
// ... which we reach when the box has this much RAM:
|
|
|
|
const maxAtRAM = 8 << 30
|
2015-05-11 09:01:09 +02:00
|
|
|
|
|
|
|
if v := cfg.Options().DatabaseBlockCacheMiB; v != 0 {
|
|
|
|
// Use the value from the config, if it's set.
|
|
|
|
blockCacheCapacity = v << 20
|
|
|
|
} else if bytes, err := memorySize(); err == nil {
|
|
|
|
// We start at the default of 8 MiB and use larger values for machines
|
|
|
|
// with more memory.
|
|
|
|
|
2015-05-14 20:57:17 +02:00
|
|
|
if bytes > maxAtRAM {
|
|
|
|
// Cap the cache at maxCapacity when we reach maxAtRam amount of memory
|
|
|
|
blockCacheCapacity = maxCapacity
|
|
|
|
} else if bytes > maxAtRAM/maxCapacity*int64(blockCacheCapacity) {
|
|
|
|
// Grow from the default to maxCapacity at maxAtRam amount of memory
|
|
|
|
blockCacheCapacity = int(bytes * maxCapacity / maxAtRAM)
|
2015-04-30 20:25:44 +02:00
|
|
|
}
|
|
|
|
l.Infoln("Database block cache capacity", blockCacheCapacity/1024, "KiB")
|
|
|
|
}
|
|
|
|
|
|
|
|
return &opt.Options{
|
|
|
|
OpenFilesCacheCapacity: 100,
|
|
|
|
BlockCacheCapacity: blockCacheCapacity,
|
2015-05-23 21:05:52 +02:00
|
|
|
WriteBuffer: 4 << 20,
|
2015-04-30 20:25:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-25 11:21:47 +02:00
|
|
|
func startAuditing(mainSvc *suture.Supervisor) {
|
|
|
|
auditFile := timestampedLoc(locAuditLog)
|
|
|
|
fd, err := os.OpenFile(auditFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
|
|
|
if err != nil {
|
|
|
|
l.Fatalln("Audit:", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
auditSvc := newAuditSvc(fd)
|
|
|
|
mainSvc.Add(auditSvc)
|
|
|
|
|
|
|
|
// We wait for the audit service to fully start before we return, to
|
|
|
|
// ensure we capture all events from the start.
|
|
|
|
auditSvc.WaitForStart()
|
|
|
|
|
|
|
|
l.Infoln("Audit log in", auditFile)
|
|
|
|
}
|
|
|
|
|
2015-06-16 09:17:58 +02:00
|
|
|
func setupGUI(mainSvc *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub *events.BufferedSubscription) {
|
2014-10-10 17:32:43 +02:00
|
|
|
opts := cfg.Options()
|
|
|
|
guiCfg := overrideGUIConfig(cfg.GUI(), guiAddress, guiAuthentication, guiAPIKey)
|
|
|
|
|
|
|
|
if guiCfg.Enabled && guiCfg.Address != "" {
|
|
|
|
addr, err := net.ResolveTCPAddr("tcp", guiCfg.Address)
|
|
|
|
if err != nil {
|
|
|
|
l.Fatalf("Cannot start GUI on %q: %v", guiCfg.Address, err)
|
|
|
|
} else {
|
|
|
|
var hostOpen, hostShow string
|
|
|
|
switch {
|
|
|
|
case addr.IP == nil:
|
|
|
|
hostOpen = "localhost"
|
|
|
|
hostShow = "0.0.0.0"
|
|
|
|
case addr.IP.IsUnspecified():
|
|
|
|
hostOpen = "localhost"
|
|
|
|
hostShow = addr.IP.String()
|
|
|
|
default:
|
|
|
|
hostOpen = addr.IP.String()
|
|
|
|
hostShow = hostOpen
|
|
|
|
}
|
|
|
|
|
|
|
|
var proto = "http"
|
|
|
|
if guiCfg.UseTLS {
|
|
|
|
proto = "https"
|
|
|
|
}
|
|
|
|
|
|
|
|
urlShow := fmt.Sprintf("%s://%s/", proto, net.JoinHostPort(hostShow, strconv.Itoa(addr.Port)))
|
|
|
|
l.Infoln("Starting web GUI on", urlShow)
|
2015-06-22 17:57:08 +02:00
|
|
|
api, err := newAPISvc(myID, guiCfg, guiAssets, m, apiSub)
|
2014-10-10 17:32:43 +02:00
|
|
|
if err != nil {
|
|
|
|
l.Fatalln("Cannot start GUI:", err)
|
|
|
|
}
|
2015-06-03 09:47:39 +02:00
|
|
|
cfg.Subscribe(api)
|
2015-04-28 23:12:19 +02:00
|
|
|
mainSvc.Add(api)
|
|
|
|
|
2014-10-10 17:32:43 +02:00
|
|
|
if opts.StartBrowser && !noBrowser && !stRestarting {
|
|
|
|
urlOpen := fmt.Sprintf("%s://%s/", proto, net.JoinHostPort(hostOpen, strconv.Itoa(addr.Port)))
|
2015-01-20 23:25:34 +01:00
|
|
|
// Can potentially block if the utility we are invoking doesn't
|
|
|
|
// fork, and just execs, hence keep it in it's own routine.
|
|
|
|
go openURL(urlOpen)
|
2014-10-10 17:32:43 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func defaultConfig(myName string) config.Configuration {
|
|
|
|
newCfg := config.New(myID)
|
|
|
|
newCfg.Folders = []config.FolderConfiguration{
|
|
|
|
{
|
|
|
|
ID: "default",
|
2015-04-05 22:52:22 +02:00
|
|
|
RawPath: locations[locDefFolder],
|
2014-10-10 17:32:43 +02:00
|
|
|
RescanIntervalS: 60,
|
2015-07-16 12:52:36 +02:00
|
|
|
MinDiskFreePct: 1,
|
2014-10-10 17:32:43 +02:00
|
|
|
Devices: []config.FolderDeviceConfiguration{{DeviceID: myID}},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
newCfg.Devices = []config.DeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: myID,
|
|
|
|
Addresses: []string{"dynamic"},
|
|
|
|
Name: myName,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2015-03-26 21:36:06 +01:00
|
|
|
port, err := getFreePort("127.0.0.1", 8384)
|
2014-10-10 17:32:43 +02:00
|
|
|
if err != nil {
|
|
|
|
l.Fatalln("get free port (GUI):", err)
|
|
|
|
}
|
|
|
|
newCfg.GUI.Address = fmt.Sprintf("127.0.0.1:%d", port)
|
|
|
|
|
|
|
|
port, err = getFreePort("0.0.0.0", 22000)
|
|
|
|
if err != nil {
|
|
|
|
l.Fatalln("get free port (BEP):", err)
|
|
|
|
}
|
|
|
|
newCfg.Options.ListenAddress = []string{fmt.Sprintf("0.0.0.0:%d", port)}
|
|
|
|
return newCfg
|
|
|
|
}
|
|
|
|
|
2015-03-26 23:26:51 +01:00
|
|
|
func generatePingEvents() {
|
2014-07-13 21:07:24 +02:00
|
|
|
for {
|
2015-03-26 23:26:51 +01:00
|
|
|
time.Sleep(pingEventInterval)
|
2014-07-13 21:07:24 +02:00
|
|
|
events.Default.Log(events.Ping, nil)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-03 20:06:03 +02:00
|
|
|
func resetDB() error {
|
|
|
|
return os.RemoveAll(locations[locDatabase])
|
2014-07-15 14:27:46 +02:00
|
|
|
}
|
|
|
|
|
2014-02-12 12:10:44 +01:00
|
|
|
func restart() {
|
2014-05-15 02:08:56 +02:00
|
|
|
l.Infoln("Restarting")
|
2014-09-02 13:08:24 +02:00
|
|
|
stop <- exitRestarting
|
2014-02-12 12:10:44 +01:00
|
|
|
}
|
|
|
|
|
2014-05-12 01:16:27 +02:00
|
|
|
func shutdown() {
|
2014-09-02 13:08:24 +02:00
|
|
|
l.Infoln("Shutting down")
|
|
|
|
stop <- exitSuccess
|
2014-05-12 01:16:27 +02:00
|
|
|
}
|
|
|
|
|
2015-07-17 22:22:07 +02:00
|
|
|
func discovery(extPort int, relaySvc *relay.Svc) *discover.Discoverer {
|
2014-10-06 09:25:45 +02:00
|
|
|
opts := cfg.Options()
|
2015-07-17 22:22:07 +02:00
|
|
|
disc := discover.NewDiscoverer(myID, opts.ListenAddress, relaySvc)
|
2014-10-06 09:25:45 +02:00
|
|
|
if opts.LocalAnnEnabled {
|
2014-08-20 01:00:21 +02:00
|
|
|
l.Infoln("Starting local discovery announcements")
|
2014-10-06 09:25:45 +02:00
|
|
|
disc.StartLocal(opts.LocalAnnPort, opts.LocalAnnMCAddr)
|
2013-12-22 22:29:23 +01:00
|
|
|
}
|
|
|
|
|
2014-10-06 09:25:45 +02:00
|
|
|
if opts.GlobalAnnEnabled {
|
2015-07-23 21:01:25 +02:00
|
|
|
go func() {
|
|
|
|
// Defer starting global announce server, giving time to connect
|
|
|
|
// to relay servers.
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
l.Infoln("Starting global discovery announcements")
|
|
|
|
disc.StartGlobal(opts.GlobalAnnServers, uint16(extPort))
|
|
|
|
}()
|
|
|
|
|
2013-12-15 11:43:31 +01:00
|
|
|
}
|
|
|
|
|
2014-02-05 23:17:17 +01:00
|
|
|
return disc
|
|
|
|
}
|
|
|
|
|
2013-12-22 00:16:49 +01:00
|
|
|
func ensureDir(dir string, mode int) {
|
2013-12-15 11:43:31 +01:00
|
|
|
fi, err := os.Stat(dir)
|
|
|
|
if os.IsNotExist(err) {
|
2015-05-20 22:46:37 +02:00
|
|
|
err := osutil.MkdirAll(dir, 0700)
|
2014-09-20 15:42:20 +02:00
|
|
|
if err != nil {
|
|
|
|
l.Fatalln(err)
|
|
|
|
}
|
2013-12-22 00:16:49 +01:00
|
|
|
} else if mode >= 0 && err == nil && int(fi.Mode()&0777) != mode {
|
|
|
|
err := os.Chmod(dir, os.FileMode(mode))
|
2014-09-20 15:41:52 +02:00
|
|
|
// This can fail on crappy filesystems, nothing we can do about it.
|
|
|
|
if err != nil {
|
|
|
|
l.Warnln(err)
|
|
|
|
}
|
2013-12-15 11:43:31 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-11 20:21:41 +02:00
|
|
|
// getFreePort returns a free TCP port fort listening on. The ports given are
|
|
|
|
// tried in succession and the first to succeed is returned. If none succeed,
|
|
|
|
// a random high port is returned.
|
|
|
|
func getFreePort(host string, ports ...int) (int, error) {
|
|
|
|
for _, port := range ports {
|
|
|
|
c, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port))
|
|
|
|
if err == nil {
|
|
|
|
c.Close()
|
|
|
|
return port, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
c, err := net.Listen("tcp", host+":0")
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2014-07-31 21:29:44 +02:00
|
|
|
addr := c.Addr().(*net.TCPAddr)
|
2014-05-11 20:21:41 +02:00
|
|
|
c.Close()
|
2014-07-31 21:29:44 +02:00
|
|
|
return addr.Port, nil
|
|
|
|
}
|
2014-05-11 20:21:41 +02:00
|
|
|
|
2014-10-06 09:25:45 +02:00
|
|
|
func overrideGUIConfig(cfg config.GUIConfiguration, address, authentication, apikey string) config.GUIConfiguration {
|
2014-08-16 00:24:24 +02:00
|
|
|
if address != "" {
|
|
|
|
cfg.Enabled = true
|
|
|
|
|
2014-10-06 09:25:45 +02:00
|
|
|
if !strings.Contains(address, "//") {
|
|
|
|
// Assume just an IP was given. Don't touch he TLS setting.
|
|
|
|
cfg.Address = address
|
|
|
|
} else {
|
|
|
|
parsed, err := url.Parse(address)
|
|
|
|
if err != nil {
|
|
|
|
l.Fatalln(err)
|
|
|
|
}
|
2014-10-06 17:55:54 +02:00
|
|
|
cfg.Address = parsed.Host
|
2014-10-06 09:25:45 +02:00
|
|
|
switch parsed.Scheme {
|
|
|
|
case "http":
|
|
|
|
cfg.UseTLS = false
|
|
|
|
case "https":
|
|
|
|
cfg.UseTLS = true
|
|
|
|
default:
|
|
|
|
l.Fatalln("Unknown scheme:", parsed.Scheme)
|
|
|
|
}
|
2014-08-16 00:24:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if authentication != "" {
|
|
|
|
authenticationParts := strings.SplitN(authentication, ":", 2)
|
|
|
|
|
|
|
|
hash, err := bcrypt.GenerateFromPassword([]byte(authenticationParts[1]), 0)
|
|
|
|
if err != nil {
|
|
|
|
l.Fatalln("Invalid GUI password:", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg.User = authenticationParts[0]
|
|
|
|
cfg.Password = string(hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
if apikey != "" {
|
|
|
|
cfg.APIKey = apikey
|
|
|
|
}
|
|
|
|
return cfg
|
|
|
|
}
|
2014-08-27 23:38:36 +02:00
|
|
|
|
|
|
|
func standbyMonitor() {
|
2014-09-11 20:25:08 +02:00
|
|
|
restartDelay := time.Duration(60 * time.Second)
|
2014-08-27 23:38:36 +02:00
|
|
|
now := time.Now()
|
|
|
|
for {
|
|
|
|
time.Sleep(10 * time.Second)
|
|
|
|
if time.Since(now) > 2*time.Minute {
|
2014-09-27 14:44:15 +02:00
|
|
|
l.Infof("Paused state detected, possibly woke up from standby. Restarting in %v.", restartDelay)
|
2014-09-10 22:20:03 +02:00
|
|
|
|
|
|
|
// We most likely just woke from standby. If we restart
|
|
|
|
// immediately chances are we won't have networking ready. Give
|
|
|
|
// things a moment to stabilize.
|
2014-09-11 20:25:08 +02:00
|
|
|
time.Sleep(restartDelay)
|
2014-09-10 22:20:03 +02:00
|
|
|
|
2014-08-27 23:38:36 +02:00
|
|
|
restart()
|
2014-09-10 22:20:03 +02:00
|
|
|
return
|
2014-08-27 23:38:36 +02:00
|
|
|
}
|
|
|
|
now = time.Now()
|
|
|
|
}
|
|
|
|
}
|
2014-09-26 00:51:12 +02:00
|
|
|
|
|
|
|
func autoUpgrade() {
|
2014-12-27 00:12:12 +01:00
|
|
|
timer := time.NewTimer(0)
|
|
|
|
sub := events.Default.Subscribe(events.DeviceConnected)
|
2014-09-26 00:51:12 +02:00
|
|
|
for {
|
2014-12-27 00:12:12 +01:00
|
|
|
select {
|
|
|
|
case event := <-sub.C():
|
|
|
|
data, ok := event.Data.(map[string]string)
|
|
|
|
if !ok || data["clientName"] != "syncthing" || upgrade.CompareVersions(data["clientVersion"], Version) != upgrade.Newer {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
l.Infof("Connected to device %s with a newer version (current %q < remote %q). Checking for upgrades.", data["id"], Version, data["clientVersion"])
|
|
|
|
case <-timer.C:
|
2014-09-26 00:51:12 +02:00
|
|
|
}
|
|
|
|
|
2015-04-09 22:44:36 +02:00
|
|
|
rel, err := upgrade.LatestRelease(Version)
|
2014-10-01 00:01:32 +02:00
|
|
|
if err == upgrade.ErrUpgradeUnsupported {
|
2014-12-27 00:12:12 +01:00
|
|
|
events.Default.Unsubscribe(sub)
|
2014-10-01 00:01:32 +02:00
|
|
|
return
|
|
|
|
}
|
2014-09-26 00:51:12 +02:00
|
|
|
if err != nil {
|
2014-09-30 17:38:12 +02:00
|
|
|
// Don't complain too loudly here; we might simply not have
|
|
|
|
// internet connectivity, or the upgrade server might be down.
|
|
|
|
l.Infoln("Automatic upgrade:", err)
|
2014-12-27 00:12:12 +01:00
|
|
|
timer.Reset(time.Duration(cfg.Options().AutoUpgradeIntervalH) * time.Hour)
|
2014-09-26 00:51:12 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2014-12-29 12:21:08 +01:00
|
|
|
if upgrade.CompareVersions(rel.Tag, Version) != upgrade.Newer {
|
|
|
|
// Skip equal, older or majorly newer (incompatible) versions
|
2014-12-27 00:12:12 +01:00
|
|
|
timer.Reset(time.Duration(cfg.Options().AutoUpgradeIntervalH) * time.Hour)
|
2014-09-26 00:51:12 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
l.Infof("Automatic upgrade (current %q < latest %q)", Version, rel.Tag)
|
2014-12-08 16:36:15 +01:00
|
|
|
err = upgrade.To(rel)
|
2014-09-26 00:51:12 +02:00
|
|
|
if err != nil {
|
|
|
|
l.Warnln("Automatic upgrade:", err)
|
2014-12-27 00:12:12 +01:00
|
|
|
timer.Reset(time.Duration(cfg.Options().AutoUpgradeIntervalH) * time.Hour)
|
2014-09-26 00:51:12 +02:00
|
|
|
continue
|
|
|
|
}
|
2014-12-27 00:12:12 +01:00
|
|
|
events.Default.Unsubscribe(sub)
|
2014-09-26 00:51:12 +02:00
|
|
|
l.Warnf("Automatically upgraded to version %q. Restarting in 1 minute.", rel.Tag)
|
|
|
|
time.Sleep(time.Minute)
|
|
|
|
stop <- exitUpgrading
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2015-04-07 09:25:28 +02:00
|
|
|
|
|
|
|
// cleanConfigDirectory removes old, unused configuration and index formats, a
|
|
|
|
// suitable time after they have gone out of fashion.
|
|
|
|
func cleanConfigDirectory() {
|
|
|
|
patterns := map[string]time.Duration{
|
|
|
|
"panic-*.log": 7 * 24 * time.Hour, // keep panic logs for a week
|
2015-04-25 11:21:47 +02:00
|
|
|
"audit-*.log": 7 * 24 * time.Hour, // keep audit logs for a week
|
2015-04-07 09:25:28 +02:00
|
|
|
"index": 14 * 24 * time.Hour, // keep old index format for two weeks
|
|
|
|
"config.xml.v*": 30 * 24 * time.Hour, // old config versions for a month
|
|
|
|
"*.idx.gz": 30 * 24 * time.Hour, // these should for sure no longer exist
|
|
|
|
"backup-of-v0.8": 30 * 24 * time.Hour, // these neither
|
|
|
|
}
|
|
|
|
|
|
|
|
for pat, dur := range patterns {
|
|
|
|
pat = filepath.Join(baseDirs["config"], pat)
|
2015-04-26 16:41:04 +02:00
|
|
|
files, err := osutil.Glob(pat)
|
2015-04-07 09:25:28 +02:00
|
|
|
if err != nil {
|
|
|
|
l.Infoln("Cleaning:", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, file := range files {
|
2015-04-14 12:31:25 +02:00
|
|
|
info, err := osutil.Lstat(file)
|
2015-04-07 09:25:28 +02:00
|
|
|
if err != nil {
|
|
|
|
l.Infoln("Cleaning:", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if time.Since(info.ModTime()) > dur {
|
|
|
|
if err = os.RemoveAll(file); err != nil {
|
|
|
|
l.Infoln("Cleaning:", err)
|
|
|
|
} else {
|
|
|
|
l.Infoln("Cleaned away old file", filepath.Base(file))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-04-09 12:53:13 +02:00
|
|
|
|
|
|
|
// checkShortIDs verifies that the configuration won't result in duplicate
|
|
|
|
// short ID:s; that is, that the devices in the cluster all have unique
|
|
|
|
// initial 64 bits.
|
|
|
|
func checkShortIDs(cfg *config.Wrapper) error {
|
|
|
|
exists := make(map[uint64]protocol.DeviceID)
|
|
|
|
for deviceID := range cfg.Devices() {
|
|
|
|
shortID := deviceID.Short()
|
|
|
|
if otherID, ok := exists[shortID]; ok {
|
|
|
|
return fmt.Errorf("%v in conflict with %v", deviceID, otherID)
|
|
|
|
}
|
|
|
|
exists[shortID] = deviceID
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2015-08-16 16:10:16 +02:00
|
|
|
|
|
|
|
// A "better" version of leveldb's errors.IsCorrupted.
|
|
|
|
func leveldbIsCorrupted(err error) bool {
|
|
|
|
switch {
|
|
|
|
case err == nil:
|
|
|
|
return false
|
|
|
|
|
|
|
|
case errors.IsCorrupted(err):
|
|
|
|
return true
|
|
|
|
|
|
|
|
case strings.Contains(err.Error(), "corrupted"):
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|