all: Revert the underscore sillyness

This commit is contained in:
Jakob Borg 2019-02-02 12:16:27 +01:00
parent 9fd270d78e
commit c2ddc83509
70 changed files with 252 additions and 252 deletions

View File

@ -82,7 +82,7 @@ func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
return err return err
} }
_ = os.Chmod(p1, os.FileMode(rand.Intn(0777)|0400)) os.Chmod(p1, os.FileMode(rand.Intn(0777)|0400))
t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second) t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second)
return os.Chtimes(p1, t, t) return os.Chtimes(p1, t, t)

View File

@ -44,7 +44,7 @@ func (s *auditService) Serve() {
for { for {
select { select {
case ev := <-sub.C(): case ev := <-sub.C():
_ = enc.Encode(ev) enc.Encode(ev)
case <-s.stop: case <-s.stop:
return return
} }

View File

@ -13,6 +13,6 @@ import "time"
func cpuUsage() time.Duration { func cpuUsage() time.Duration {
var rusage syscall.Rusage var rusage syscall.Rusage
_ = syscall.Getrusage(syscall.RUSAGE_SELF, &rusage) syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
return time.Duration(rusage.Utime.Nano() + rusage.Stime.Nano()) return time.Duration(rusage.Utime.Nano() + rusage.Stime.Nano())
} }

View File

@ -965,7 +965,7 @@ func (s *apiService) postSystemShutdown(w http.ResponseWriter, r *http.Request)
} }
func (s *apiService) flushResponse(resp string, w http.ResponseWriter) { func (s *apiService) flushResponse(resp string, w http.ResponseWriter) {
_, _ = w.Write([]byte(resp + "\n")) w.Write([]byte(resp + "\n"))
f := w.(http.Flusher) f := w.(http.Flusher)
f.Flush() f.Flush()
} }
@ -1152,7 +1152,7 @@ func (s *apiService) getSupportBundle(w http.ResponseWriter, r *http.Request) {
// Serve the buffer zip to client for download // Serve the buffer zip to client for download
w.Header().Set("Content-Type", "application/zip") w.Header().Set("Content-Type", "application/zip")
w.Header().Set("Content-Disposition", "attachment; filename="+zipFileName) w.Header().Set("Content-Disposition", "attachment; filename="+zipFileName)
_, _ = io.Copy(w, &zipFilesBuffer) io.Copy(w, &zipFilesBuffer)
} }
func (s *apiService) getSystemHTTPMetrics(w http.ResponseWriter, r *http.Request) { func (s *apiService) getSystemHTTPMetrics(w http.ResponseWriter, r *http.Request) {
@ -1172,7 +1172,7 @@ func (s *apiService) getSystemHTTPMetrics(w http.ResponseWriter, r *http.Request
} }
}) })
bs, _ := json.MarshalIndent(stats, "", " ") bs, _ := json.MarshalIndent(stats, "", " ")
_, _ = w.Write(bs) w.Write(bs)
} }
func (s *apiService) getSystemDiscovery(w http.ResponseWriter, r *http.Request) { func (s *apiService) getSystemDiscovery(w http.ResponseWriter, r *http.Request) {
@ -1464,7 +1464,7 @@ func (s *apiService) getQR(w http.ResponseWriter, r *http.Request) {
} }
w.Header().Set("Content-Type", "image/png") w.Header().Set("Content-Type", "image/png")
_, _ = w.Write(code.PNG()) w.Write(code.PNG())
} }
func (s *apiService) getPeerCompletion(w http.ResponseWriter, r *http.Request) { func (s *apiService) getPeerCompletion(w http.ResponseWriter, r *http.Request) {
@ -1562,7 +1562,7 @@ func (s *apiService) getSystemBrowse(w http.ResponseWriter, r *http.Request) {
// Default value or in case of error unmarshalling ends up being basic fs. // Default value or in case of error unmarshalling ends up being basic fs.
var fsType fs.FilesystemType var fsType fs.FilesystemType
_ = fsType.UnmarshalText([]byte(qs.Get("filesystem"))) fsType.UnmarshalText([]byte(qs.Get("filesystem")))
sendJSON(w, browseFiles(current, fsType)) sendJSON(w, browseFiles(current, fsType))
} }
@ -1659,7 +1659,7 @@ func (s *apiService) getHeapProf(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Disposition", "attachment; filename="+filename) w.Header().Set("Content-Disposition", "attachment; filename="+filename)
runtime.GC() runtime.GC()
_ = pprof.WriteHeapProfile(w) pprof.WriteHeapProfile(w)
} }
func toJsonFileInfoSlice(fs []db.FileInfoTruncated) []jsonDBFileInfo { func toJsonFileInfoSlice(fs []db.FileInfoTruncated) []jsonDBFileInfo {

View File

@ -160,7 +160,7 @@ func (s *staticsServer) serveAsset(w http.ResponseWriter, r *http.Request) {
} }
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs))) w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
_, _ = w.Write(bs) w.Write(bs)
} }
func (s *staticsServer) serveThemes(w http.ResponseWriter, r *http.Request) { func (s *staticsServer) serveThemes(w http.ResponseWriter, r *http.Request) {

View File

@ -114,13 +114,13 @@ func TestAssetsDir(t *testing.T) {
// The asset map contains compressed assets, so create a couple of gzip compressed assets here. // The asset map contains compressed assets, so create a couple of gzip compressed assets here.
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
gw := gzip.NewWriter(buf) gw := gzip.NewWriter(buf)
_, _ = gw.Write([]byte("default")) gw.Write([]byte("default"))
gw.Close() gw.Close()
def := buf.Bytes() def := buf.Bytes()
buf = new(bytes.Buffer) buf = new(bytes.Buffer)
gw = gzip.NewWriter(buf) gw = gzip.NewWriter(buf)
_, _ = gw.Write([]byte("foo")) gw.Write([]byte("foo"))
gw.Close() gw.Close()
foo := buf.Bytes() foo := buf.Bytes()

View File

@ -49,7 +49,7 @@ func saveHeapProfiles(rate int) {
panic(err) panic(err)
} }
_ = os.Remove(name) // Error deliberately ignored os.Remove(name) // Error deliberately ignored
err = os.Rename(name+".tmp", name) err = os.Rename(name+".tmp", name)
if err != nil { if err != nil {
panic(err) panic(err)

View File

@ -634,7 +634,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
// Attempt to increase the limit on number of open files to the maximum // Attempt to increase the limit on number of open files to the maximum
// allowed, in case we have many peers. We don't really care enough to // allowed, in case we have many peers. We don't really care enough to
// report the error if there is one. // report the error if there is one.
_, _ = osutil.MaximizeOpenFileLimit() osutil.MaximizeOpenFileLimit()
// Ensure that we have a certificate and key. // Ensure that we have a certificate and key.
cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile]) cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
@ -757,7 +757,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
// Add and start folders // Add and start folders
for _, folderCfg := range cfg.Folders() { for _, folderCfg := range cfg.Folders() {
if folderCfg.Paused { if folderCfg.Paused {
_ = folderCfg.CreateRoot() folderCfg.CreateRoot()
continue continue
} }
m.AddFolder(folderCfg) m.AddFolder(folderCfg)
@ -847,8 +847,8 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
l.Infoln("Anonymous usage reporting is always enabled for candidate releases.") l.Infoln("Anonymous usage reporting is always enabled for candidate releases.")
if opts.URAccepted != usageReportVersion { if opts.URAccepted != usageReportVersion {
opts.URAccepted = usageReportVersion opts.URAccepted = usageReportVersion
_, _ = cfg.SetOptions(opts) cfg.SetOptions(opts)
_ = cfg.Save() cfg.Save()
// Unique ID will be set and config saved below if necessary. // Unique ID will be set and config saved below if necessary.
} }
} }
@ -856,8 +856,8 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
// If we are going to do usage reporting, ensure we have a valid unique ID. // If we are going to do usage reporting, ensure we have a valid unique ID.
if opts := cfg.Options(); opts.URAccepted > 0 && opts.URUniqueID == "" { if opts := cfg.Options(); opts.URAccepted > 0 && opts.URUniqueID == "" {
opts.URUniqueID = rand.String(8) opts.URUniqueID = rand.String(8)
_, _ = cfg.SetOptions(opts) cfg.SetOptions(opts)
_ = cfg.Save() cfg.Save()
} }
usageReportingSvc := newUsageReportingService(cfg, m, connectionsService) usageReportingSvc := newUsageReportingService(cfg, m, connectionsService)
@ -877,8 +877,8 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
opts.AutoUpgradeIntervalH = 12 opts.AutoUpgradeIntervalH = 12
// Set the option into the config as well, as the auto upgrade // Set the option into the config as well, as the auto upgrade
// loop expects to read a valid interval from there. // loop expects to read a valid interval from there.
_, _ = cfg.SetOptions(opts) cfg.SetOptions(opts)
_ = cfg.Save() cfg.Save()
} }
// We don't tweak the user's choice of upgrading to pre-releases or // We don't tweak the user's choice of upgrading to pre-releases or
// not, as otherwise they cannot step off the candidate channel. // not, as otherwise they cannot step off the candidate channel.
@ -959,7 +959,7 @@ func loadConfigAtStartup() *config.Wrapper {
cfg, err := config.Load(cfgFile, myID) cfg, err := config.Load(cfgFile, myID)
if os.IsNotExist(err) { if os.IsNotExist(err) {
cfg = defaultConfig(cfgFile) cfg = defaultConfig(cfgFile)
_ = cfg.Save() cfg.Save()
l.Infof("Default config saved. Edit %s to taste or use the GUI\n", cfg.ConfigPath()) l.Infof("Default config saved. Edit %s to taste or use the GUI\n", cfg.ConfigPath())
} else if err == io.EOF { } else if err == io.EOF {
l.Fatalln("Failed to load config: unexpected end of file. Truncated or empty configuration?") l.Fatalln("Failed to load config: unexpected end of file. Truncated or empty configuration?")

View File

@ -30,7 +30,7 @@ func (c *mockedConfig) LDAP() config.LDAPConfiguration {
func (c *mockedConfig) RawCopy() config.Configuration { func (c *mockedConfig) RawCopy() config.Configuration {
cfg := config.Configuration{} cfg := config.Configuration{}
_ = util.SetDefaults(&cfg.Options) util.SetDefaults(&cfg.Options)
return cfg return cfg
} }

View File

@ -127,13 +127,13 @@ func monitorMain(runtimeOptions RuntimeOptions) {
select { select {
case s := <-stopSign: case s := <-stopSign:
l.Infof("Signal %d received; exiting", s) l.Infof("Signal %d received; exiting", s)
_ = cmd.Process.Signal(sigTerm) cmd.Process.Signal(sigTerm)
<-exit <-exit
return return
case s := <-restartSign: case s := <-restartSign:
l.Infof("Signal %d received; restarting", s) l.Infof("Signal %d received; restarting", s)
_ = cmd.Process.Signal(sigHup) cmd.Process.Signal(sigHup)
err = <-exit err = <-exit
case err = <-exit: case err = <-exit:
@ -179,7 +179,7 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
} }
if panicFd == nil { if panicFd == nil {
_, _ = dst.Write([]byte(line)) dst.Write([]byte(line))
if strings.Contains(line, "SIGILL") { if strings.Contains(line, "SIGILL") {
l.Warnln(` l.Warnln(`
@ -226,20 +226,20 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
stdoutMut.Lock() stdoutMut.Lock()
for _, line := range stdoutFirstLines { for _, line := range stdoutFirstLines {
_, _ = panicFd.WriteString(line) panicFd.WriteString(line)
} }
_, _ = panicFd.WriteString("...\n") panicFd.WriteString("...\n")
for _, line := range stdoutLastLines { for _, line := range stdoutLastLines {
_, _ = panicFd.WriteString(line) panicFd.WriteString(line)
} }
stdoutMut.Unlock() stdoutMut.Unlock()
} }
_, _ = panicFd.WriteString("Panic at " + time.Now().Format(time.RFC3339) + "\n") panicFd.WriteString("Panic at " + time.Now().Format(time.RFC3339) + "\n")
} }
if panicFd != nil { if panicFd != nil {
_, _ = panicFd.WriteString(line) panicFd.WriteString(line)
} }
} }
} }
@ -263,7 +263,7 @@ func copyStdout(stdout io.Reader, dst io.Writer) {
} }
stdoutMut.Unlock() stdoutMut.Unlock()
_, _ = dst.Write([]byte(line)) dst.Write([]byte(line))
} }
} }

View File

@ -17,7 +17,7 @@ import (
func TestAutoClosedFile(t *testing.T) { func TestAutoClosedFile(t *testing.T) {
os.RemoveAll("_autoclose") os.RemoveAll("_autoclose")
defer os.RemoveAll("_autoclose") defer os.RemoveAll("_autoclose")
_ = os.Mkdir("_autoclose", 0755) os.Mkdir("_autoclose", 0755)
file := filepath.FromSlash("_autoclose/tmp") file := filepath.FromSlash("_autoclose/tmp")
data := []byte("hello, world\n") data := []byte("hello, world\n")

View File

@ -19,11 +19,11 @@ func optionTable(w io.Writer, rows [][]string) {
for _, row := range rows { for _, row := range rows {
for i, cell := range row { for i, cell := range row {
if i > 0 { if i > 0 {
_, _ = tw.Write([]byte("\t")) tw.Write([]byte("\t"))
} }
_, _ = tw.Write([]byte(cell)) tw.Write([]byte(cell))
} }
_, _ = tw.Write([]byte("\n")) tw.Write([]byte("\n"))
} }
tw.Flush() tw.Flush()
} }

View File

@ -427,7 +427,7 @@ func (*usageReportingService) String() string {
func cpuBench(iterations int, duration time.Duration, useWeakHash bool) float64 { func cpuBench(iterations int, duration time.Duration, useWeakHash bool) float64 {
dataSize := 16 * protocol.MinBlockSize dataSize := 16 * protocol.MinBlockSize
bs := make([]byte, dataSize) bs := make([]byte, dataSize)
_, _ = rand.Reader.Read(bs) rand.Reader.Read(bs)
var perf float64 var perf float64
for i := 0; i < iterations; i++ { for i := 0; i < iterations; i++ {

View File

@ -126,9 +126,9 @@ func (w *broadcastWriter) Serve() {
for _, ip := range dsts { for _, ip := range dsts {
dst := &net.UDPAddr{IP: ip, Port: w.port} dst := &net.UDPAddr{IP: ip, Port: w.port}
_ = conn.SetWriteDeadline(time.Now().Add(time.Second)) conn.SetWriteDeadline(time.Now().Add(time.Second))
_, err := conn.WriteTo(bs, dst) _, err := conn.WriteTo(bs, dst)
_ = conn.SetWriteDeadline(time.Time{}) conn.SetWriteDeadline(time.Time{})
if err, ok := err.(net.Error); ok && err.Timeout() { if err, ok := err.(net.Error); ok && err.Timeout() {
// Write timeouts should not happen. We treat it as a fatal // Write timeouts should not happen. We treat it as a fatal

View File

@ -117,9 +117,9 @@ func (w *multicastWriter) Serve() {
success := 0 success := 0
for _, intf := range intfs { for _, intf := range intfs {
wcm.IfIndex = intf.Index wcm.IfIndex = intf.Index
_ = pconn.SetWriteDeadline(time.Now().Add(time.Second)) pconn.SetWriteDeadline(time.Now().Add(time.Second))
_, err = pconn.WriteTo(bs, wcm, gaddr) _, err = pconn.WriteTo(bs, wcm, gaddr)
_ = pconn.SetWriteDeadline(time.Time{}) pconn.SetWriteDeadline(time.Time{})
if err != nil { if err != nil {
l.Debugln(err, "on write to", gaddr, intf.Name) l.Debugln(err, "on write to", gaddr, intf.Name)

View File

@ -71,9 +71,9 @@ func New(myID protocol.DeviceID) Configuration {
cfg.Version = CurrentVersion cfg.Version = CurrentVersion
cfg.OriginalVersion = CurrentVersion cfg.OriginalVersion = CurrentVersion
_ = util.SetDefaults(&cfg) util.SetDefaults(&cfg)
_ = util.SetDefaults(&cfg.Options) util.SetDefaults(&cfg.Options)
_ = util.SetDefaults(&cfg.GUI) util.SetDefaults(&cfg.GUI)
// Can't happen. // Can't happen.
if err := cfg.prepare(myID); err != nil { if err := cfg.prepare(myID); err != nil {
@ -86,9 +86,9 @@ func New(myID protocol.DeviceID) Configuration {
func ReadXML(r io.Reader, myID protocol.DeviceID) (Configuration, error) { func ReadXML(r io.Reader, myID protocol.DeviceID) (Configuration, error) {
var cfg Configuration var cfg Configuration
_ = util.SetDefaults(&cfg) util.SetDefaults(&cfg)
_ = util.SetDefaults(&cfg.Options) util.SetDefaults(&cfg.Options)
_ = util.SetDefaults(&cfg.GUI) util.SetDefaults(&cfg.GUI)
if err := xml.NewDecoder(r).Decode(&cfg); err != nil { if err := xml.NewDecoder(r).Decode(&cfg); err != nil {
return Configuration{}, err return Configuration{}, err
@ -104,9 +104,9 @@ func ReadXML(r io.Reader, myID protocol.DeviceID) (Configuration, error) {
func ReadJSON(r io.Reader, myID protocol.DeviceID) (Configuration, error) { func ReadJSON(r io.Reader, myID protocol.DeviceID) (Configuration, error) {
var cfg Configuration var cfg Configuration
_ = util.SetDefaults(&cfg) util.SetDefaults(&cfg)
_ = util.SetDefaults(&cfg.Options) util.SetDefaults(&cfg.Options)
_ = util.SetDefaults(&cfg.GUI) util.SetDefaults(&cfg.GUI)
bs, err := ioutil.ReadAll(r) bs, err := ioutil.ReadAll(r)
if err != nil { if err != nil {
@ -211,7 +211,7 @@ found:
} }
func (cfg *Configuration) clean() error { func (cfg *Configuration) clean() error {
_ = util.FillNilSlices(&cfg.Options) util.FillNilSlices(&cfg.Options)
// Prepare folders and check for duplicates. Duplicates are bad and // Prepare folders and check for duplicates. Duplicates are bad and
// dangerous, can't currently be resolved in the GUI, and shouldn't // dangerous, can't currently be resolved in the GUI, and shouldn't
@ -477,7 +477,7 @@ func convertV22V23(cfg *Configuration) {
err = fs.Remove(DefaultMarkerName) err = fs.Remove(DefaultMarkerName)
if err == nil { if err == nil {
err = fs.Mkdir(DefaultMarkerName, permBits) err = fs.Mkdir(DefaultMarkerName, permBits)
_ = fs.Hide(DefaultMarkerName) // ignore error fs.Hide(DefaultMarkerName) // ignore error
} }
if err != nil { if err != nil {
l.Infoln("Failed to upgrade folder marker:", err) l.Infoln("Failed to upgrade folder marker:", err)
@ -810,13 +810,13 @@ func cleanSymlinks(filesystem fs.Filesystem, dir string) {
// should leave alone. Deduplicated files, for example. // should leave alone. Deduplicated files, for example.
return return
} }
_ = filesystem.Walk(dir, func(path string, info fs.FileInfo, err error) error { filesystem.Walk(dir, func(path string, info fs.FileInfo, err error) error {
if err != nil { if err != nil {
return err return err
} }
if info.IsSymlink() { if info.IsSymlink() {
l.Infoln("Removing incorrectly versioned symlink", path) l.Infoln("Removing incorrectly versioned symlink", path)
_ = filesystem.Remove(path) filesystem.Remove(path)
return fs.SkipDir return fs.SkipDir
} }
return nil return nil

View File

@ -548,7 +548,7 @@ func TestPrepare(t *testing.T) {
t.Error("Expected nil") t.Error("Expected nil")
} }
_ = cfg.prepare(device1) cfg.prepare(device1)
if cfg.Folders == nil || cfg.Devices == nil || cfg.Options.ListenAddresses == nil { if cfg.Folders == nil || cfg.Devices == nil || cfg.Options.ListenAddresses == nil {
t.Error("Unexpected nil") t.Error("Unexpected nil")
@ -627,7 +627,7 @@ func TestPullOrder(t *testing.T) {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
cfg := wrapper.RawCopy() cfg := wrapper.RawCopy()
_ = cfg.WriteXML(buf) cfg.WriteXML(buf)
t.Logf("%s", buf.Bytes()) t.Logf("%s", buf.Bytes())
@ -1080,7 +1080,7 @@ func TestDeviceConfigObservedNotNil(t *testing.T) {
}, },
} }
_ = cfg.prepare(device1) cfg.prepare(device1)
for _, dev := range cfg.Devices { for _, dev := range cfg.Devices {
if dev.IgnoredFolders == nil { if dev.IgnoredFolders == nil {

View File

@ -142,7 +142,7 @@ func (f *FolderConfiguration) CreateMarker() error {
} else if err := dir.Sync(); err != nil { } else if err := dir.Sync(); err != nil {
l.Debugln("folder marker: fsync . failed:", err) l.Debugln("folder marker: fsync . failed:", err)
} }
_ = fs.Hide(DefaultMarkerName) fs.Hide(DefaultMarkerName)
return nil return nil
} }

View File

@ -448,7 +448,7 @@ func (w *Wrapper) MyName() string {
} }
func (w *Wrapper) AddOrUpdatePendingDevice(device protocol.DeviceID, name, address string) { func (w *Wrapper) AddOrUpdatePendingDevice(device protocol.DeviceID, name, address string) {
defer func() { _ = w.Save() }() defer w.Save()
w.mut.Lock() w.mut.Lock()
defer w.mut.Unlock() defer w.mut.Unlock()
@ -471,7 +471,7 @@ func (w *Wrapper) AddOrUpdatePendingDevice(device protocol.DeviceID, name, addre
} }
func (w *Wrapper) AddOrUpdatePendingFolder(id, label string, device protocol.DeviceID) { func (w *Wrapper) AddOrUpdatePendingFolder(id, label string, device protocol.DeviceID) {
defer func() { _ = w.Save() }() defer w.Save()
w.mut.Lock() w.mut.Lock()
defer w.mut.Unlock() defer w.mut.Unlock()

View File

@ -257,17 +257,17 @@ func take(waiter waiter, tokens int) {
if tokens < limiterBurstSize { if tokens < limiterBurstSize {
// This is the by far more common case so we get it out of the way // This is the by far more common case so we get it out of the way
// early. // early.
_ = waiter.WaitN(context.TODO(), tokens) waiter.WaitN(context.TODO(), tokens)
return return
} }
for tokens > 0 { for tokens > 0 {
// Consume limiterBurstSize tokens at a time until we're done. // Consume limiterBurstSize tokens at a time until we're done.
if tokens > limiterBurstSize { if tokens > limiterBurstSize {
_ = waiter.WaitN(context.TODO(), limiterBurstSize) waiter.WaitN(context.TODO(), limiterBurstSize)
tokens -= limiterBurstSize tokens -= limiterBurstSize
} else { } else {
_ = waiter.WaitN(context.TODO(), tokens) waiter.WaitN(context.TODO(), tokens)
tokens = 0 tokens = 0
} }
} }

View File

@ -190,7 +190,7 @@ next:
continue continue
} }
_ = c.SetDeadline(time.Now().Add(20 * time.Second)) c.SetDeadline(time.Now().Add(20 * time.Second))
hello, err := protocol.ExchangeHello(c, s.model.GetHello(remoteID)) hello, err := protocol.ExchangeHello(c, s.model.GetHello(remoteID))
if err != nil { if err != nil {
if protocol.IsVersionMismatch(err) { if protocol.IsVersionMismatch(err) {
@ -214,7 +214,7 @@ next:
c.Close() c.Close()
continue continue
} }
_ = c.SetDeadline(time.Time{}) c.SetDeadline(time.Time{})
// The Model will return an error for devices that we don't want to // The Model will return an error for devices that we don't want to
// have a connection with for whatever reason, for example unknown devices. // have a connection with for whatever reason, for example unknown devices.
@ -569,7 +569,7 @@ func (s *Service) CommitConfiguration(from, to config.Configuration) bool {
for addr, listener := range s.listeners { for addr, listener := range s.listeners {
if _, ok := seen[addr]; !ok || listener.Factory().Valid(to) != nil { if _, ok := seen[addr]; !ok || listener.Factory().Valid(to) != nil {
l.Debugln("Stopping listener", addr) l.Debugln("Stopping listener", addr)
_ = s.listenerSupervisor.Remove(s.listenerTokens[addr]) s.listenerSupervisor.Remove(s.listenerTokens[addr])
delete(s.listenerTokens, addr) delete(s.listenerTokens, addr)
delete(s.listeners, addr) delete(s.listeners, addr)
} }
@ -582,7 +582,7 @@ func (s *Service) CommitConfiguration(from, to config.Configuration) bool {
s.natServiceToken = &token s.natServiceToken = &token
} else if !to.Options.NATEnabled && s.natServiceToken != nil { } else if !to.Options.NATEnabled && s.natServiceToken != nil {
l.Debugln("Stopping NAT service") l.Debugln("Stopping NAT service")
_ = s.Remove(*s.natServiceToken) s.Remove(*s.natServiceToken)
s.natServiceToken = nil s.natServiceToken = nil
} }
@ -717,8 +717,8 @@ func warningFor(dev protocol.DeviceID, msg string) {
} }
func tlsTimedHandshake(tc *tls.Conn) error { func tlsTimedHandshake(tc *tls.Conn) error {
_ = tc.SetDeadline(time.Now().Add(tlsHandshakeTimeout)) tc.SetDeadline(time.Now().Add(tlsHandshakeTimeout))
defer func() { _ = tc.SetDeadline(time.Time{}) }() defer tc.SetDeadline(time.Time{})
return tc.Handshake() return tc.Handshake()
} }

View File

@ -89,8 +89,8 @@ func (c internalConn) Close() {
// *tls.Conn.Close() does more than it says on the tin. Specifically, it // *tls.Conn.Close() does more than it says on the tin. Specifically, it
// sends a TLS alert message, which might block forever if the // sends a TLS alert message, which might block forever if the
// connection is dead and we don't have a deadline set. // connection is dead and we don't have a deadline set.
_ = c.SetWriteDeadline(time.Now().Add(250 * time.Millisecond)) c.SetWriteDeadline(time.Now().Add(250 * time.Millisecond))
_ = c.Conn.Close() c.Conn.Close()
} }
func (c internalConn) Type() string { func (c internalConn) Type() string {

View File

@ -83,7 +83,7 @@ func (t *tcpListener) Serve() {
const maxAcceptFailures = 10 const maxAcceptFailures = 10
for { for {
_ = listener.SetDeadline(time.Now().Add(time.Second)) listener.SetDeadline(time.Now().Add(time.Second))
conn, err := listener.Accept() conn, err := listener.Accept()
select { select {
case <-t.stop: case <-t.stop:

View File

@ -19,7 +19,7 @@ func TestIgnoredFiles(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
db := NewLowlevel(ldb, "<memory>") db := NewLowlevel(ldb, "<memory>")
_ = UpdateSchema(db) UpdateSchema(db)
fs := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db) fs := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db)
@ -204,7 +204,7 @@ func TestUpdate0to3(t *testing.T) {
func TestDowngrade(t *testing.T) { func TestDowngrade(t *testing.T) {
db := OpenMemory() db := OpenMemory()
_ = UpdateSchema(db) // sets the min version etc UpdateSchema(db) // sets the min version etc
// Bump the database version to something newer than we actually support // Bump the database version to something newer than we actually support
miscDB := NewMiscDataNamespace(db) miscDB := NewMiscDataNamespace(db)

View File

@ -61,7 +61,7 @@ func (n *NamespacedKV) Reset() {
func (n *NamespacedKV) PutInt64(key string, val int64) { func (n *NamespacedKV) PutInt64(key string, val int64) {
var valBs [8]byte var valBs [8]byte
binary.BigEndian.PutUint64(valBs[:], uint64(val)) binary.BigEndian.PutUint64(valBs[:], uint64(val))
_ = n.db.Put(n.prefixedKey(key), valBs[:], nil) n.db.Put(n.prefixedKey(key), valBs[:], nil)
} }
// Int64 returns the stored value interpreted as an int64 and a boolean that // Int64 returns the stored value interpreted as an int64 and a boolean that
@ -79,7 +79,7 @@ func (n *NamespacedKV) Int64(key string) (int64, bool) {
// type) is overwritten. // type) is overwritten.
func (n *NamespacedKV) PutTime(key string, val time.Time) { func (n *NamespacedKV) PutTime(key string, val time.Time) {
valBs, _ := val.MarshalBinary() // never returns an error valBs, _ := val.MarshalBinary() // never returns an error
_ = n.db.Put(n.prefixedKey(key), valBs, nil) n.db.Put(n.prefixedKey(key), valBs, nil)
} }
// Time returns the stored value interpreted as a time.Time and a boolean // Time returns the stored value interpreted as a time.Time and a boolean
@ -97,7 +97,7 @@ func (n NamespacedKV) Time(key string) (time.Time, bool) {
// PutString stores a new string. Any existing value (even if of another type) // PutString stores a new string. Any existing value (even if of another type)
// is overwritten. // is overwritten.
func (n *NamespacedKV) PutString(key, val string) { func (n *NamespacedKV) PutString(key, val string) {
_ = n.db.Put(n.prefixedKey(key), []byte(val), nil) n.db.Put(n.prefixedKey(key), []byte(val), nil)
} }
// String returns the stored value interpreted as a string and a boolean that // String returns the stored value interpreted as a string and a boolean that
@ -113,7 +113,7 @@ func (n NamespacedKV) String(key string) (string, bool) {
// PutBytes stores a new byte slice. Any existing value (even if of another type) // PutBytes stores a new byte slice. Any existing value (even if of another type)
// is overwritten. // is overwritten.
func (n *NamespacedKV) PutBytes(key string, val []byte) { func (n *NamespacedKV) PutBytes(key string, val []byte) {
_ = n.db.Put(n.prefixedKey(key), val, nil) n.db.Put(n.prefixedKey(key), val, nil)
} }
// Bytes returns the stored value as a raw byte slice and a boolean that // Bytes returns the stored value as a raw byte slice and a boolean that
@ -130,9 +130,9 @@ func (n NamespacedKV) Bytes(key string) ([]byte, bool) {
// is overwritten. // is overwritten.
func (n *NamespacedKV) PutBool(key string, val bool) { func (n *NamespacedKV) PutBool(key string, val bool) {
if val { if val {
_ = n.db.Put(n.prefixedKey(key), []byte{0x0}, nil) n.db.Put(n.prefixedKey(key), []byte{0x0}, nil)
} else { } else {
_ = n.db.Put(n.prefixedKey(key), []byte{0x1}, nil) n.db.Put(n.prefixedKey(key), []byte{0x1}, nil)
} }
} }
@ -149,7 +149,7 @@ func (n NamespacedKV) Bool(key string) (bool, bool) {
// Delete deletes the specified key. It is allowed to delete a nonexistent // Delete deletes the specified key. It is allowed to delete a nonexistent
// key. // key.
func (n NamespacedKV) Delete(key string) { func (n NamespacedKV) Delete(key string) {
_ = n.db.Delete(n.prefixedKey(key), nil) n.db.Delete(n.prefixedKey(key), nil)
} }
func (n NamespacedKV) prefixedKey(key string) []byte { func (n NamespacedKV) prefixedKey(key string) []byte {

View File

@ -101,7 +101,7 @@ func (s *FileSet) recalcCounts() {
}) })
s.meta.SetCreated() s.meta.SetCreated()
_ = s.meta.toDB(s.db, []byte(s.folder)) s.meta.toDB(s.db, []byte(s.folder))
} }
func (s *FileSet) Drop(device protocol.DeviceID) { func (s *FileSet) Drop(device protocol.DeviceID) {
@ -127,7 +127,7 @@ func (s *FileSet) Drop(device protocol.DeviceID) {
s.meta.resetAll(device) s.meta.resetAll(device)
} }
_ = s.meta.toDB(s.db, []byte(s.folder)) s.meta.toDB(s.db, []byte(s.folder))
} }
func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) { func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
@ -141,7 +141,7 @@ func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
s.updateMutex.Lock() s.updateMutex.Lock()
defer s.updateMutex.Unlock() defer s.updateMutex.Unlock()
defer func() { _ = s.meta.toDB(s.db, []byte(s.folder)) }() defer s.meta.toDB(s.db, []byte(s.folder))
if device == protocol.LocalDeviceID { if device == protocol.LocalDeviceID {
// For the local device we have a bunch of metadata to track. // For the local device we have a bunch of metadata to track.
@ -295,7 +295,7 @@ func DropDeltaIndexIDs(db *Lowlevel) {
dbi := db.NewIterator(util.BytesPrefix([]byte{KeyTypeIndexID}), nil) dbi := db.NewIterator(util.BytesPrefix([]byte{KeyTypeIndexID}), nil)
defer dbi.Release() defer dbi.Release()
for dbi.Next() { for dbi.Next() {
_ = db.Delete(dbi.Key(), nil) db.Delete(dbi.Key(), nil)
} }
} }

View File

@ -82,7 +82,7 @@ func (i *smallIndex) ID(val []byte) uint32 {
key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id
copy(key, i.prefix) copy(key, i.prefix)
binary.BigEndian.PutUint32(key[len(i.prefix):], id) binary.BigEndian.PutUint32(key[len(i.prefix):], id)
_ = i.db.Put(key, val, nil) i.db.Put(key, val, nil)
i.mut.Unlock() i.mut.Unlock()
return id return id
@ -115,7 +115,7 @@ func (i *smallIndex) Delete(val []byte) {
// Put an empty value into the database. This indicates that the // Put an empty value into the database. This indicates that the
// entry does not exist any more and prevents the ID from being // entry does not exist any more and prevents the ID from being
// reused in the future. // reused in the future.
_ = i.db.Put(key, []byte{}, nil) i.db.Put(key, []byte{}, nil)
// Delete reverse mapping. // Delete reverse mapping.
delete(i.id2val, id) delete(i.id2val, id)

View File

@ -127,7 +127,7 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi
var fl VersionList var fl VersionList
if svl, err := t.Get(gk, nil); err == nil { if svl, err := t.Get(gk, nil); err == nil {
_ = fl.Unmarshal(svl) // Ignore error, continue with empty fl fl.Unmarshal(svl) // Ignore error, continue with empty fl
} }
fl, removedFV, removedAt, insertedAt := fl.update(folder, device, file, t.readOnlyTransaction) fl, removedFV, removedAt, insertedAt := fl.update(folder, device, file, t.readOnlyTransaction)
if insertedAt == -1 { if insertedAt == -1 {

View File

@ -23,7 +23,7 @@ func writeJSONS(w io.Writer, db *leveldb.DB) {
defer it.Release() defer it.Release()
enc := json.NewEncoder(w) enc := json.NewEncoder(w)
for it.Next() { for it.Next() {
_ = enc.Encode(map[string][]byte{ enc.Encode(map[string][]byte{
"k": it.Key(), "k": it.Key(),
"v": it.Value(), "v": it.Value(),
}) })
@ -54,7 +54,7 @@ func openJSONS(file string) (*leveldb.DB, error) {
return nil, err return nil, err
} }
_ = db.Put(row["k"], row["v"], nil) db.Put(row["k"], row["v"], nil)
} }
return db, nil return db, nil

View File

@ -62,7 +62,7 @@ func dialWithFallback(proxyDialFunc dialFunc, fallbackDialFunc dialFunc, network
conn, err := proxyDialFunc(network, addr) conn, err := proxyDialFunc(network, addr)
if err == nil { if err == nil {
l.Debugf("Dialing %s address %s via proxy - success, %s -> %s", network, addr, conn.LocalAddr(), conn.RemoteAddr()) l.Debugf("Dialing %s address %s via proxy - success, %s -> %s", network, addr, conn.LocalAddr(), conn.RemoteAddr())
_ = SetTCPOptions(conn) SetTCPOptions(conn)
return dialerConn{ return dialerConn{
conn, newDialerAddr(network, addr), conn, newDialerAddr(network, addr),
}, nil }, nil
@ -76,7 +76,7 @@ func dialWithFallback(proxyDialFunc dialFunc, fallbackDialFunc dialFunc, network
conn, err = fallbackDialFunc(network, addr) conn, err = fallbackDialFunc(network, addr)
if err == nil { if err == nil {
l.Debugf("Dialing %s address %s via fallback - success, %s -> %s", network, addr, conn.LocalAddr(), conn.RemoteAddr()) l.Debugf("Dialing %s address %s via fallback - success, %s -> %s", network, addr, conn.LocalAddr(), conn.RemoteAddr())
_ = SetTCPOptions(conn) SetTCPOptions(conn)
} else { } else {
l.Debugf("Dialing %s address %s via fallback - error %s", network, addr, err) l.Debugf("Dialing %s address %s via fallback - error %s", network, addr, err)
} }

View File

@ -96,7 +96,7 @@ func TestCacheSlowLookup(t *testing.T) {
// Start a lookup, which will take at least a second // Start a lookup, which will take at least a second
t0 := time.Now() t0 := time.Now()
go func() { _, _ = c.Lookup(protocol.LocalDeviceID) }() go c.Lookup(protocol.LocalDeviceID)
<-started // The slow lookup method has been called so we're inside the lock <-started // The slow lookup method has been called so we're inside the lock
// It should be possible to get ChildErrors while it's running // It should be possible to get ChildErrors while it's running

View File

@ -242,7 +242,7 @@ func (s *fakeDiscoveryServer) handler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(204) w.WriteHeader(204)
} else { } else {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"addresses":["tcp://192.0.2.42::22000"], "relays":[{"url": "relay://192.0.2.43:443", "latency": 42}]}`)) w.Write([]byte(`{"addresses":["tcp://192.0.2.42::22000"], "relays":[{"url": "relay://192.0.2.43:443", "latency": 42}]}`))
} }
} }

View File

@ -164,7 +164,7 @@ func TestGlobalIDs(t *testing.T) {
s := l.Subscribe(AllEvents) s := l.Subscribe(AllEvents)
defer l.Unsubscribe(s) defer l.Unsubscribe(s)
l.Log(DeviceConnected, "foo") l.Log(DeviceConnected, "foo")
_ = l.Subscribe(AllEvents) l.Subscribe(AllEvents)
l.Log(DeviceConnected, "bar") l.Log(DeviceConnected, "bar")
ev, err := s.Poll(timeout) ev, err := s.Poll(timeout)

View File

@ -33,7 +33,7 @@ func TestChmodFile(t *testing.T) {
path := filepath.Join(dir, "file") path := filepath.Join(dir, "file")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
defer func() { _ = os.Chmod(path, 0666) }() defer os.Chmod(path, 0666)
fd, err := os.Create(path) fd, err := os.Create(path)
if err != nil { if err != nil {
@ -74,7 +74,7 @@ func TestChownFile(t *testing.T) {
path := filepath.Join(dir, "file") path := filepath.Join(dir, "file")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
defer func() { _ = os.Chmod(path, 0666) }() defer os.Chmod(path, 0666)
fd, err := os.Create(path) fd, err := os.Create(path)
if err != nil { if err != nil {
@ -116,7 +116,7 @@ func TestChmodDir(t *testing.T) {
mode = os.FileMode(0777) mode = os.FileMode(0777)
} }
defer func() { _ = os.Chmod(path, mode) }() defer os.Chmod(path, mode)
if err := os.Mkdir(path, mode); err != nil { if err := os.Mkdir(path, mode); err != nil {
t.Error(err) t.Error(err)
@ -147,7 +147,7 @@ func TestChtimes(t *testing.T) {
mtime := time.Now().Add(-time.Hour) mtime := time.Now().Add(-time.Hour)
_ = fs.Chtimes("file", mtime, mtime) fs.Chtimes("file", mtime, mtime)
stat, err := os.Stat(path) stat, err := os.Stat(path)
if err != nil { if err != nil {

View File

@ -98,7 +98,7 @@ func TestWatchInclude(t *testing.T) {
file := "file" file := "file"
ignored := "ignored" ignored := "ignored"
_ = testFs.MkdirAll(filepath.Join(name, ignored), 0777) testFs.MkdirAll(filepath.Join(name, ignored), 0777)
included := filepath.Join(ignored, "included") included := filepath.Join(ignored, "included")
testCase := func() { testCase := func() {
@ -274,7 +274,7 @@ func TestWatchSymlinkedRoot(t *testing.T) {
if err := testFs.MkdirAll(name, 0755); err != nil { if err := testFs.MkdirAll(name, 0755); err != nil {
panic(fmt.Sprintf("Failed to create directory %s: %s", name, err)) panic(fmt.Sprintf("Failed to create directory %s: %s", name, err))
} }
defer func() { _ = testFs.RemoveAll(name) }() defer testFs.RemoveAll(name)
root := filepath.Join(name, "root") root := filepath.Join(name, "root")
if err := testFs.MkdirAll(root, 0777); err != nil { if err := testFs.MkdirAll(root, 0777); err != nil {
@ -376,7 +376,7 @@ func testScenario(t *testing.T, name string, testCase func(), expectedEvents, al
if err := testFs.MkdirAll(name, 0755); err != nil { if err := testFs.MkdirAll(name, 0755); err != nil {
panic(fmt.Sprintf("Failed to create directory %s: %s", name, err)) panic(fmt.Sprintf("Failed to create directory %s: %s", name, err))
} }
defer func() { _ = testFs.RemoveAll(name) }() defer testFs.RemoveAll(name)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()

View File

@ -105,22 +105,22 @@ func newFakeFilesystem(root string) *fakefs {
for (files == 0 || createdFiles < files) && (maxsize == 0 || writtenData>>20 < int64(maxsize)) { for (files == 0 || createdFiles < files) && (maxsize == 0 || writtenData>>20 < int64(maxsize)) {
dir := filepath.Join(fmt.Sprintf("%02x", rng.Intn(255)), fmt.Sprintf("%02x", rng.Intn(255))) dir := filepath.Join(fmt.Sprintf("%02x", rng.Intn(255)), fmt.Sprintf("%02x", rng.Intn(255)))
file := fmt.Sprintf("%016x", rng.Int63()) file := fmt.Sprintf("%016x", rng.Int63())
_ = fs.MkdirAll(dir, 0755) fs.MkdirAll(dir, 0755)
fd, _ := fs.Create(filepath.Join(dir, file)) fd, _ := fs.Create(filepath.Join(dir, file))
createdFiles++ createdFiles++
fsize := int64(sizeavg/2 + rng.Intn(sizeavg)) fsize := int64(sizeavg/2 + rng.Intn(sizeavg))
_ = fd.Truncate(fsize) fd.Truncate(fsize)
writtenData += fsize writtenData += fsize
ftime := time.Unix(1000000000+rng.Int63n(10*365*86400), 0) ftime := time.Unix(1000000000+rng.Int63n(10*365*86400), 0)
_ = fs.Chtimes(filepath.Join(dir, file), ftime, ftime) fs.Chtimes(filepath.Join(dir, file), ftime, ftime)
} }
} }
// Also create a default folder marker for good measure // Also create a default folder marker for good measure
_ = fs.Mkdir(".stfolder", 0700) fs.Mkdir(".stfolder", 0700)
fakefsFs[root] = fs fakefsFs[root] = fs
return fs return fs
@ -583,7 +583,7 @@ func (f *fakeFile) readShortAt(p []byte, offs int64) (int, error) {
// name. // name.
if f.seed == 0 { if f.seed == 0 {
hf := fnv.New64() hf := fnv.New64()
_, _ = hf.Write([]byte(f.name)) hf.Write([]byte(f.name))
f.seed = int64(hf.Sum64()) f.seed = int64(hf.Sum64())
} }
@ -601,7 +601,7 @@ func (f *fakeFile) readShortAt(p []byte, offs int64) (int, error) {
diff := offs - minOffs diff := offs - minOffs
if diff > 0 { if diff > 0 {
lr := io.LimitReader(f.rng, diff) lr := io.LimitReader(f.rng, diff)
_, _ = io.Copy(ioutil.Discard, lr) io.Copy(ioutil.Discard, lr)
} }
f.offset = offs f.offset = offs

View File

@ -130,10 +130,10 @@ func TestFakeFSRead(t *testing.T) {
// Create // Create
fd, _ := fs.Create("test") fd, _ := fs.Create("test")
_ = fd.Truncate(3 * 1 << randomBlockShift) fd.Truncate(3 * 1 << randomBlockShift)
// Read // Read
_, _ = fd.Seek(0, io.SeekStart) fd.Seek(0, io.SeekStart)
bs0, err := ioutil.ReadAll(fd) bs0, err := ioutil.ReadAll(fd)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -143,7 +143,7 @@ func TestFakeFSRead(t *testing.T) {
} }
// Read again, starting at an odd offset // Read again, starting at an odd offset
_, _ = fd.Seek(0, io.SeekStart) fd.Seek(0, io.SeekStart)
buf0 := make([]byte, 12345) buf0 := make([]byte, 12345)
n, _ := fd.Read(buf0) n, _ := fd.Read(buf0)
if n != len(buf0) { if n != len(buf0) {

View File

@ -53,7 +53,7 @@ func (f *MtimeFS) Chtimes(name string, atime, mtime time.Time) error {
} }
// Do a normal Chtimes call, don't care if it succeeds or not. // Do a normal Chtimes call, don't care if it succeeds or not.
_ = f.chtimes(name, atime, mtime) f.chtimes(name, atime, mtime)
// Stat the file to see what happened. Here we *do* return an error, // Stat the file to see what happened. Here we *do* return an error,
// because it might be "does not exist" or similar. // because it might be "does not exist" or similar.

View File

@ -18,10 +18,10 @@ import (
func TestMtimeFS(t *testing.T) { func TestMtimeFS(t *testing.T) {
os.RemoveAll("testdata") os.RemoveAll("testdata")
defer os.RemoveAll("testdata") defer os.RemoveAll("testdata")
_ = os.Mkdir("testdata", 0755) os.Mkdir("testdata", 0755)
_ = ioutil.WriteFile("testdata/exists0", []byte("hello"), 0644) ioutil.WriteFile("testdata/exists0", []byte("hello"), 0644)
_ = ioutil.WriteFile("testdata/exists1", []byte("hello"), 0644) ioutil.WriteFile("testdata/exists1", []byte("hello"), 0644)
_ = ioutil.WriteFile("testdata/exists2", []byte("hello"), 0644) ioutil.WriteFile("testdata/exists2", []byte("hello"), 0644)
// a random time with nanosecond precision // a random time with nanosecond precision
testTime := time.Unix(1234567890, 123456789) testTime := time.Unix(1234567890, 123456789)
@ -73,7 +73,7 @@ func TestMtimeFS(t *testing.T) {
// filesystems. // filesystems.
testTime = time.Now().Add(5 * time.Hour).Truncate(time.Minute) testTime = time.Now().Add(5 * time.Hour).Truncate(time.Minute)
_ = os.Chtimes("testdata/exists0", testTime, testTime) os.Chtimes("testdata/exists0", testTime, testTime)
if info, err := mtimefs.Lstat("testdata/exists0"); err != nil { if info, err := mtimefs.Lstat("testdata/exists0"); err != nil {
t.Error("Lstat shouldn't fail:", err) t.Error("Lstat shouldn't fail:", err)
} else if !info.ModTime().Equal(testTime) { } else if !info.ModTime().Equal(testTime) {
@ -93,8 +93,8 @@ func TestMtimeFSInsensitive(t *testing.T) {
theTest := func(t *testing.T, fs *MtimeFS, shouldSucceed bool) { theTest := func(t *testing.T, fs *MtimeFS, shouldSucceed bool) {
os.RemoveAll("testdata") os.RemoveAll("testdata")
defer os.RemoveAll("testdata") defer os.RemoveAll("testdata")
_ = os.Mkdir("testdata", 0755) os.Mkdir("testdata", 0755)
_ = ioutil.WriteFile("testdata/FiLe", []byte("hello"), 0644) ioutil.WriteFile("testdata/FiLe", []byte("hello"), 0644)
// a random time with nanosecond precision // a random time with nanosecond precision
testTime := time.Unix(1234567890, 123456789) testTime := time.Unix(1234567890, 123456789)

View File

@ -51,7 +51,7 @@ func TempNameWithPrefix(name, prefix string) string {
tbase := filepath.Base(name) tbase := filepath.Base(name)
if len(tbase) > maxFilenameLength { if len(tbase) > maxFilenameLength {
hash := md5.New() hash := md5.New()
_, _ = hash.Write([]byte(name)) hash.Write([]byte(name))
tbase = fmt.Sprintf("%x", hash.Sum(nil)) tbase = fmt.Sprintf("%x", hash.Sum(nil))
} }
tname := fmt.Sprintf("%s%s.tmp", prefix, tbase) tname := fmt.Sprintf("%s%s.tmp", prefix, tbase)

View File

@ -136,7 +136,7 @@ func (m *Matcher) Load(file string) error {
fd, info, err := loadIgnoreFile(m.fs, file, m.changeDetector) fd, info, err := loadIgnoreFile(m.fs, file, m.changeDetector)
if err != nil { if err != nil {
_ = m.parseLocked(&bytes.Buffer{}, file) m.parseLocked(&bytes.Buffer{}, file)
return err return err
} }
defer fd.Close() defer fd.Close()
@ -310,8 +310,8 @@ func (m *Matcher) SkipIgnoredDirs() bool {
func hashPatterns(patterns []Pattern) string { func hashPatterns(patterns []Pattern) string {
h := md5.New() h := md5.New()
for _, pat := range patterns { for _, pat := range patterns {
_, _ = h.Write([]byte(pat.String())) h.Write([]byte(pat.String()))
_, _ = h.Write([]byte("\n")) h.Write([]byte("\n"))
} }
return fmt.Sprintf("%x", h.Sum(nil)) return fmt.Sprintf("%x", h.Sum(nil))
} }
@ -505,7 +505,7 @@ func WriteIgnores(filesystem fs.Filesystem, path string, content []string) error
if err := fd.Close(); err != nil { if err := fd.Close(); err != nil {
return err return err
} }
_ = filesystem.Hide(path) filesystem.Hide(path)
return nil return nil
} }

View File

@ -247,15 +247,15 @@ func TestCaching(t *testing.T) {
defer fd1.Close() defer fd1.Close()
defer fd2.Close() defer fd2.Close()
defer func() { _ = fs.Remove(fd1.Name()) }() defer fs.Remove(fd1.Name())
defer func() { _ = fs.Remove(fd2.Name()) }() defer fs.Remove(fd2.Name())
_, err = fd1.Write([]byte("/x/\n#include " + filepath.Base(fd2.Name()) + "\n")) _, err = fd1.Write([]byte("/x/\n#include " + filepath.Base(fd2.Name()) + "\n"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, _ = fd2.Write([]byte("/y/\n")) fd2.Write([]byte("/y/\n"))
pats := New(fs, WithCache(true)) pats := New(fs, WithCache(true))
err = pats.Load(fd1.Name()) err = pats.Load(fd1.Name())
@ -290,10 +290,10 @@ func TestCaching(t *testing.T) {
// Modify the include file, expect empty cache. Ensure the timestamp on // Modify the include file, expect empty cache. Ensure the timestamp on
// the file changes. // the file changes.
_, _ = fd2.Write([]byte("/z/\n")) fd2.Write([]byte("/z/\n"))
_ = fd2.Sync() fd2.Sync()
fakeTime := time.Now().Add(5 * time.Second) fakeTime := time.Now().Add(5 * time.Second)
_ = fs.Chtimes(fd2.Name(), fakeTime, fakeTime) fs.Chtimes(fd2.Name(), fakeTime, fakeTime)
err = pats.Load(fd1.Name()) err = pats.Load(fd1.Name())
if err != nil { if err != nil {
@ -322,10 +322,10 @@ func TestCaching(t *testing.T) {
// Modify the root file, expect cache to be invalidated // Modify the root file, expect cache to be invalidated
_, _ = fd1.Write([]byte("/a/\n")) fd1.Write([]byte("/a/\n"))
_ = fd1.Sync() fd1.Sync()
fakeTime = time.Now().Add(5 * time.Second) fakeTime = time.Now().Add(5 * time.Second)
_ = fs.Chtimes(fd1.Name(), fakeTime, fakeTime) fs.Chtimes(fd1.Name(), fakeTime, fakeTime)
err = pats.Load(fd1.Name()) err = pats.Load(fd1.Name())
if err != nil { if err != nil {
@ -435,7 +435,7 @@ flamingo
_, err = fd.Write([]byte(stignore)) _, err = fd.Write([]byte(stignore))
defer fd.Close() defer fd.Close()
defer func() { _ = fs.Remove(fd.Name()) }() defer fs.Remove(fd.Name())
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -475,7 +475,7 @@ func TestCacheReload(t *testing.T) {
} }
defer fd.Close() defer fd.Close()
defer func() { _ = fs.Remove(fd.Name()) }() defer fs.Remove(fd.Name())
// Ignore file matches f1 and f2 // Ignore file matches f1 and f2
@ -516,9 +516,9 @@ func TestCacheReload(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_ = fd.Sync() fd.Sync()
fakeTime := time.Now().Add(5 * time.Second) fakeTime := time.Now().Add(5 * time.Second)
_ = fs.Chtimes(fd.Name(), fakeTime, fakeTime) fs.Chtimes(fd.Name(), fakeTime, fakeTime)
err = pats.Load(fd.Name()) err = pats.Load(fd.Name())
if err != nil { if err != nil {
@ -606,7 +606,7 @@ func TestHashOfEmpty(t *testing.T) {
// recalculate the hash. d41d8cd98f00b204e9800998ecf8427e is the md5 of // recalculate the hash. d41d8cd98f00b204e9800998ecf8427e is the md5 of
// nothing. // nothing.
_ = p1.Load("file/does/not/exist") p1.Load("file/does/not/exist")
secondHash := p1.Hash() secondHash := p1.Hash()
if firstHash == secondHash { if firstHash == secondHash {

View File

@ -120,7 +120,7 @@ func (l *logger) debugln(level int, vals ...interface{}) {
s := fmt.Sprintln(vals...) s := fmt.Sprintln(vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
_ = l.logger.Output(level, "DEBUG: "+s) l.logger.Output(level, "DEBUG: "+s)
l.callHandlers(LevelDebug, s) l.callHandlers(LevelDebug, s)
} }
@ -132,7 +132,7 @@ func (l *logger) debugf(level int, format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...) s := fmt.Sprintf(format, vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
_ = l.logger.Output(level, "DEBUG: "+s) l.logger.Output(level, "DEBUG: "+s)
l.callHandlers(LevelDebug, s) l.callHandlers(LevelDebug, s)
} }
@ -141,7 +141,7 @@ func (l *logger) Verboseln(vals ...interface{}) {
s := fmt.Sprintln(vals...) s := fmt.Sprintln(vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
_ = l.logger.Output(2, "VERBOSE: "+s) l.logger.Output(2, "VERBOSE: "+s)
l.callHandlers(LevelVerbose, s) l.callHandlers(LevelVerbose, s)
} }
@ -150,7 +150,7 @@ func (l *logger) Verbosef(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...) s := fmt.Sprintf(format, vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
_ = l.logger.Output(2, "VERBOSE: "+s) l.logger.Output(2, "VERBOSE: "+s)
l.callHandlers(LevelVerbose, s) l.callHandlers(LevelVerbose, s)
} }
@ -159,7 +159,7 @@ func (l *logger) Infoln(vals ...interface{}) {
s := fmt.Sprintln(vals...) s := fmt.Sprintln(vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
_ = l.logger.Output(2, "INFO: "+s) l.logger.Output(2, "INFO: "+s)
l.callHandlers(LevelInfo, s) l.callHandlers(LevelInfo, s)
} }
@ -168,7 +168,7 @@ func (l *logger) Infof(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...) s := fmt.Sprintf(format, vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
_ = l.logger.Output(2, "INFO: "+s) l.logger.Output(2, "INFO: "+s)
l.callHandlers(LevelInfo, s) l.callHandlers(LevelInfo, s)
} }
@ -177,7 +177,7 @@ func (l *logger) Warnln(vals ...interface{}) {
s := fmt.Sprintln(vals...) s := fmt.Sprintln(vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
_ = l.logger.Output(2, "WARNING: "+s) l.logger.Output(2, "WARNING: "+s)
l.callHandlers(LevelWarn, s) l.callHandlers(LevelWarn, s)
} }
@ -186,7 +186,7 @@ func (l *logger) Warnf(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...) s := fmt.Sprintf(format, vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
_ = l.logger.Output(2, "WARNING: "+s) l.logger.Output(2, "WARNING: "+s)
l.callHandlers(LevelWarn, s) l.callHandlers(LevelWarn, s)
} }
@ -196,7 +196,7 @@ func (l *logger) Fatalln(vals ...interface{}) {
s := fmt.Sprintln(vals...) s := fmt.Sprintln(vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
_ = l.logger.Output(2, "FATAL: "+s) l.logger.Output(2, "FATAL: "+s)
l.callHandlers(LevelFatal, s) l.callHandlers(LevelFatal, s)
os.Exit(1) os.Exit(1)
} }
@ -207,7 +207,7 @@ func (l *logger) Fatalf(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...) s := fmt.Sprintf(format, vals...)
l.mut.Lock() l.mut.Lock()
defer l.mut.Unlock() defer l.mut.Unlock()
_ = l.logger.Output(2, "FATAL: "+s) l.logger.Output(2, "FATAL: "+s)
l.callHandlers(LevelFatal, s) l.callHandlers(LevelFatal, s)
os.Exit(1) os.Exit(1)
} }

View File

@ -1016,7 +1016,7 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
// Otherwise, discard the file ourselves in order for the // Otherwise, discard the file ourselves in order for the
// sharedpuller not to panic when it fails to exclusively create a // sharedpuller not to panic when it fails to exclusively create a
// file which already exists // file which already exists
_ = osutil.InWritableDir(f.fs.Remove, f.fs, tempName) osutil.InWritableDir(f.fs.Remove, f.fs, tempName)
} }
} else { } else {
// Copy the blocks, as we don't want to shuffle them on the FileInfo // Copy the blocks, as we don't want to shuffle them on the FileInfo
@ -1142,7 +1142,7 @@ func (f *sendReceiveFolder) shortcutFile(file, curFile protocol.FileInfo, dbUpda
} }
} }
_ = f.fs.Chtimes(file.Name, file.ModTime(), file.ModTime()) // never fails f.fs.Chtimes(file.Name, file.ModTime(), file.ModTime()) // never fails
// This may have been a conflict. We should merge the version vectors so // This may have been a conflict. We should merge the version vectors so
// that our clock doesn't move backwards. // that our clock doesn't move backwards.
@ -1536,7 +1536,7 @@ func (f *sendReceiveFolder) performFinish(ignores *ignore.Matcher, file, curFile
} }
// Set the correct timestamp on the new file // Set the correct timestamp on the new file
_ = f.fs.Chtimes(file.Name, file.ModTime(), file.ModTime()) // never fails f.fs.Chtimes(file.Name, file.ModTime(), file.ModTime()) // never fails
// Record the updated file in the index // Record the updated file in the index
dbUpdateChan <- dbUpdateJob{file, dbUpdateHandleFile} dbUpdateChan <- dbUpdateJob{file, dbUpdateHandleFile}
@ -1706,7 +1706,7 @@ func (f *sendReceiveFolder) pullScannerRoutine(scanChan <-chan string) {
l.Debugln(f, "scheduling scan after pulling for", path) l.Debugln(f, "scheduling scan after pulling for", path)
scanList = append(scanList, path) scanList = append(scanList, path)
} }
_ = f.Scan(scanList) f.Scan(scanList)
} }
} }
@ -1858,7 +1858,7 @@ func (f *sendReceiveFolder) deleteDir(dir string, ignores *ignore.Matcher, scanC
} }
for _, del := range toBeDeleted { for _, del := range toBeDeleted {
_ = f.fs.RemoveAll(del) f.fs.RemoveAll(del)
} }
err := osutil.InWritableDir(f.fs.Remove, f.fs, dir) err := osutil.InWritableDir(f.fs.Remove, f.fs, dir)

View File

@ -258,9 +258,9 @@ func (m *Model) startFolderLocked(folder string) config.FolderType {
ffs := fs.MtimeFS() ffs := fs.MtimeFS()
// These are our metadata files, and they should always be hidden. // These are our metadata files, and they should always be hidden.
_ = ffs.Hide(config.DefaultMarkerName) ffs.Hide(config.DefaultMarkerName)
_ = ffs.Hide(".stversions") ffs.Hide(".stversions")
_ = ffs.Hide(".stignore") ffs.Hide(".stignore")
p := folderFactory(m, cfg, ver, ffs) p := folderFactory(m, cfg, ver, ffs)
@ -338,7 +338,7 @@ func (m *Model) RemoveFolder(cfg config.FolderConfiguration) {
m.fmut.Lock() m.fmut.Lock()
m.pmut.Lock() m.pmut.Lock()
// Delete syncthing specific files // Delete syncthing specific files
_ = cfg.Filesystem().RemoveAll(config.DefaultMarkerName) cfg.Filesystem().RemoveAll(config.DefaultMarkerName)
m.tearDownFolderLocked(cfg, fmt.Errorf("removing folder %v", cfg.Description())) m.tearDownFolderLocked(cfg, fmt.Errorf("removing folder %v", cfg.Description()))
// Remove it from the database // Remove it from the database
@ -362,7 +362,7 @@ func (m *Model) tearDownFolderLocked(cfg config.FolderConfiguration, err error)
m.pmut.Unlock() m.pmut.Unlock()
m.fmut.Unlock() m.fmut.Unlock()
for _, id := range tokens { for _, id := range tokens {
_ = m.RemoveAndWait(id, 0) m.RemoveAndWait(id, 0)
} }
m.fmut.Lock() m.fmut.Lock()
m.pmut.Lock() m.pmut.Lock()
@ -1185,7 +1185,7 @@ func (m *Model) handleIntroductions(introducerCfg config.DeviceConfiguration, cm
} }
if changed { if changed {
_, _ = m.cfg.SetFolder(fcfg) m.cfg.SetFolder(fcfg)
} }
} }
@ -1242,7 +1242,7 @@ func (m *Model) handleDeintroductions(introducerCfg config.DeviceConfiguration,
cfg := m.cfg.RawCopy() cfg := m.cfg.RawCopy()
cfg.Folders = folders cfg.Folders = folders
cfg.Devices = devices cfg.Devices = devices
_, _ = m.cfg.Replace(cfg) m.cfg.Replace(cfg)
} }
return changed return changed
@ -1321,7 +1321,7 @@ func (m *Model) introduceDevice(device protocol.Device, introducerCfg config.Dev
newDeviceCfg.SkipIntroductionRemovals = device.SkipIntroductionRemovals newDeviceCfg.SkipIntroductionRemovals = device.SkipIntroductionRemovals
} }
_, _ = m.cfg.SetDevice(newDeviceCfg) m.cfg.SetDevice(newDeviceCfg)
} }
// Closed is called when a connection has been closed // Closed is called when a connection has been closed
@ -1772,8 +1772,8 @@ func (m *Model) AddConnection(conn connections.Connection, hello protocol.HelloR
if (device.Name == "" || m.cfg.Options().OverwriteRemoteDevNames) && hello.DeviceName != "" { if (device.Name == "" || m.cfg.Options().OverwriteRemoteDevNames) && hello.DeviceName != "" {
device.Name = hello.DeviceName device.Name = hello.DeviceName
_, _ = m.cfg.SetDevice(device) m.cfg.SetDevice(device)
_ = m.cfg.Save() m.cfg.Save()
} }
m.deviceWasSeen(deviceID) m.deviceWasSeen(deviceID)
@ -1860,7 +1860,7 @@ func sendIndexes(conn protocol.Connection, folder string, fs *db.FileSet, ignore
// local index may update for other folders than the one we are // local index may update for other folders than the one we are
// sending for. // sending for.
if fs.Sequence(protocol.LocalDeviceID) <= prevSequence { if fs.Sequence(protocol.LocalDeviceID) <= prevSequence {
_, _ = sub.Poll(time.Minute) sub.Poll(time.Minute)
continue continue
} }
@ -2485,7 +2485,7 @@ func (m *Model) RestoreFolderVersions(folder string, versions map[string]time.Ti
} }
} }
_ = filesystem.MkdirAll(filepath.Dir(target), 0755) filesystem.MkdirAll(filepath.Dir(target), 0755)
if err == nil { if err == nil {
err = osutil.Copy(filesystem, source, target) err = osutil.Copy(filesystem, source, target)
} }

View File

@ -328,6 +328,6 @@ findIP:
func hash(input string) int64 { func hash(input string) int64 {
h := fnv.New64a() h := fnv.New64a()
_, _ = h.Write([]byte(input)) h.Write([]byte(input))
return int64(h.Sum64()) return int64(h.Sum64())
} }

View File

@ -81,7 +81,7 @@ func (w *AtomicWriter) Close() error {
} }
// Try to not leave temp file around, but ignore error. // Try to not leave temp file around, but ignore error.
defer func() { _ = w.fs.Remove(w.next.Name()) }() defer w.fs.Remove(w.next.Name())
if err := w.next.Sync(); err != nil { if err := w.next.Sync(); err != nil {
w.err = err w.err = err
@ -110,7 +110,7 @@ func (w *AtomicWriter) Close() error {
// fsync the directory too // fsync the directory too
if fd, err := w.fs.Open(filepath.Dir(w.next.Name())); err == nil { if fd, err := w.fs.Open(filepath.Dir(w.next.Name())); err == nil {
_ = fd.Sync() fd.Sync()
fd.Close() fd.Close()
} }

View File

@ -42,7 +42,7 @@ func TryRename(filesystem fs.Filesystem, from, to string) error {
func Rename(filesystem fs.Filesystem, from, to string) error { func Rename(filesystem fs.Filesystem, from, to string) error {
// Don't leave a dangling temp file in case of rename error // Don't leave a dangling temp file in case of rename error
if !(runtime.GOOS == "windows" && strings.EqualFold(from, to)) { if !(runtime.GOOS == "windows" && strings.EqualFold(from, to)) {
defer func() { _ = filesystem.Remove(from) }() defer filesystem.Remove(from)
} }
return TryRename(filesystem, from, to) return TryRename(filesystem, from, to)
} }
@ -94,13 +94,13 @@ func withPreparedTarget(filesystem fs.Filesystem, from, to string, f func() erro
// Make sure the destination directory is writeable // Make sure the destination directory is writeable
toDir := filepath.Dir(to) toDir := filepath.Dir(to)
if info, err := filesystem.Stat(toDir); err == nil && info.IsDir() && info.Mode()&0200 == 0 { if info, err := filesystem.Stat(toDir); err == nil && info.IsDir() && info.Mode()&0200 == 0 {
_ = filesystem.Chmod(toDir, 0755) filesystem.Chmod(toDir, 0755)
defer func() { _ = filesystem.Chmod(toDir, info.Mode()) }() defer filesystem.Chmod(toDir, info.Mode())
} }
// On Windows, make sure the destination file is writeable (or we can't delete it) // On Windows, make sure the destination file is writeable (or we can't delete it)
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
_ = filesystem.Chmod(to, 0666) filesystem.Chmod(to, 0666)
if !strings.EqualFold(from, to) { if !strings.EqualFold(from, to) {
err := filesystem.Remove(to) err := filesystem.Remove(to)
if err != nil && !fs.IsNotExist(err) { if err != nil && !fs.IsNotExist(err) {

View File

@ -26,9 +26,9 @@ func TestInWriteableDir(t *testing.T) {
fs := fs.NewFilesystem(fs.FilesystemTypeBasic, ".") fs := fs.NewFilesystem(fs.FilesystemTypeBasic, ".")
_ = os.Mkdir("testdata", 0700) os.Mkdir("testdata", 0700)
_ = os.Mkdir("testdata/rw", 0700) os.Mkdir("testdata/rw", 0700)
_ = os.Mkdir("testdata/ro", 0500) os.Mkdir("testdata/ro", 0500)
create := func(name string) error { create := func(name string) error {
fd, err := os.Create(name) fd, err := os.Create(name)
@ -87,7 +87,7 @@ func TestInWritableDirWindowsRemove(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer func() { _ = os.Chmod("testdata/windows/ro/readonlynew", 0700) }() defer os.Chmod("testdata/windows/ro/readonlynew", 0700)
defer os.RemoveAll("testdata") defer os.RemoveAll("testdata")
create := func(name string) error { create := func(name string) error {
@ -99,12 +99,12 @@ func TestInWritableDirWindowsRemove(t *testing.T) {
return nil return nil
} }
_ = os.Mkdir("testdata", 0700) os.Mkdir("testdata", 0700)
_ = os.Mkdir("testdata/windows", 0500) os.Mkdir("testdata/windows", 0500)
_ = os.Mkdir("testdata/windows/ro", 0500) os.Mkdir("testdata/windows/ro", 0500)
_ = create("testdata/windows/ro/readonly") create("testdata/windows/ro/readonly")
_ = os.Chmod("testdata/windows/ro/readonly", 0500) os.Chmod("testdata/windows/ro/readonly", 0500)
fs := fs.NewFilesystem(fs.FilesystemTypeBasic, ".") fs := fs.NewFilesystem(fs.FilesystemTypeBasic, ".")
@ -128,8 +128,8 @@ func TestInWritableDirWindowsRemoveAll(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer func() { _ = os.Chmod("testdata/windows/ro/readonlynew", 0700) }() defer os.Chmod("testdata/windows/ro/readonlynew", 0700)
defer func() { _ = os.RemoveAll("testdata") }() defer os.RemoveAll("testdata")
create := func(name string) error { create := func(name string) error {
fd, err := os.Create(name) fd, err := os.Create(name)
@ -140,12 +140,12 @@ func TestInWritableDirWindowsRemoveAll(t *testing.T) {
return nil return nil
} }
_ = os.Mkdir("testdata", 0700) os.Mkdir("testdata", 0700)
_ = os.Mkdir("testdata/windows", 0500) os.Mkdir("testdata/windows", 0500)
_ = os.Mkdir("testdata/windows/ro", 0500) os.Mkdir("testdata/windows/ro", 0500)
_ = create("testdata/windows/ro/readonly") create("testdata/windows/ro/readonly")
_ = os.Chmod("testdata/windows/ro/readonly", 0500) os.Chmod("testdata/windows/ro/readonly", 0500)
if err := os.RemoveAll("testdata/windows"); err != nil { if err := os.RemoveAll("testdata/windows"); err != nil {
t.Errorf("Unexpected error: %s", err) t.Errorf("Unexpected error: %s", err)
@ -162,8 +162,8 @@ func TestInWritableDirWindowsRename(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer func() { _ = os.Chmod("testdata/windows/ro/readonlynew", 0700) }() defer os.Chmod("testdata/windows/ro/readonlynew", 0700)
defer func() { _ = os.RemoveAll("testdata") }() defer os.RemoveAll("testdata")
create := func(name string) error { create := func(name string) error {
fd, err := os.Create(name) fd, err := os.Create(name)
@ -174,12 +174,12 @@ func TestInWritableDirWindowsRename(t *testing.T) {
return nil return nil
} }
_ = os.Mkdir("testdata", 0700) os.Mkdir("testdata", 0700)
_ = os.Mkdir("testdata/windows", 0500) os.Mkdir("testdata/windows", 0500)
_ = os.Mkdir("testdata/windows/ro", 0500) os.Mkdir("testdata/windows/ro", 0500)
_ = create("testdata/windows/ro/readonly") create("testdata/windows/ro/readonly")
_ = os.Chmod("testdata/windows/ro/readonly", 0500) os.Chmod("testdata/windows/ro/readonly", 0500)
fs := fs.NewFilesystem(fs.FilesystemTypeBasic, ".") fs := fs.NewFilesystem(fs.FilesystemTypeBasic, ".")
@ -232,7 +232,7 @@ func TestIsDeleted(t *testing.T) {
testFs := fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata") testFs := fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata")
_ = testFs.MkdirAll("dir", 0777) testFs.MkdirAll("dir", 0777)
for _, f := range []string{"file", "del.file", "dir.file", "dir/file"} { for _, f := range []string{"file", "del.file", "dir.file", "dir/file"} {
fd, err := testFs.Create(f) fd, err := testFs.Create(f)
if err != nil { if err != nil {
@ -242,7 +242,7 @@ func TestIsDeleted(t *testing.T) {
} }
if runtime.GOOS != "windows" { if runtime.GOOS != "windows" {
// Can't create unreadable dir on windows // Can't create unreadable dir on windows
_ = testFs.MkdirAll("inacc", 0777) testFs.MkdirAll("inacc", 0777)
if err := testFs.Chmod("inacc", 0000); err == nil { if err := testFs.Chmod("inacc", 0000); err == nil {
if _, err := testFs.Lstat("inacc/file"); fs.IsPermission(err) { if _, err := testFs.Lstat("inacc/file"); fs.IsPermission(err) {
// May fail e.g. if tests are run as root -> just skip // May fail e.g. if tests are run as root -> just skip
@ -265,6 +265,6 @@ func TestIsDeleted(t *testing.T) {
} }
} }
_ = testFs.Chmod("inacc", 0777) testFs.Chmod("inacc", 0777)
os.RemoveAll("testdata") os.RemoveAll("testdata")
} }

View File

@ -25,7 +25,7 @@ func TestTraversesSymlink(t *testing.T) {
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
fs := fs.NewFilesystem(fs.FilesystemTypeBasic, tmpDir) fs := fs.NewFilesystem(fs.FilesystemTypeBasic, tmpDir)
_ = fs.MkdirAll("a/b/c", 0755) fs.MkdirAll("a/b/c", 0755)
if err = osutil.DebugSymlinkForTestsOnly(filepath.Join(fs.URI(), "a", "b"), filepath.Join(fs.URI(), "a", "l")); err != nil { if err = osutil.DebugSymlinkForTestsOnly(filepath.Join(fs.URI(), "a", "b"), filepath.Join(fs.URI(), "a", "l")); err != nil {
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
t.Skip("Symlinks aren't working") t.Skip("Symlinks aren't working")
@ -78,7 +78,7 @@ func TestIssue4875(t *testing.T) {
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
testFs := fs.NewFilesystem(fs.FilesystemTypeBasic, tmpDir) testFs := fs.NewFilesystem(fs.FilesystemTypeBasic, tmpDir)
_ = testFs.MkdirAll("a/b/c", 0755) testFs.MkdirAll("a/b/c", 0755)
if err = osutil.DebugSymlinkForTestsOnly(filepath.Join(testFs.URI(), "a", "b"), filepath.Join(testFs.URI(), "a", "l")); err != nil { if err = osutil.DebugSymlinkForTestsOnly(filepath.Join(testFs.URI(), "a", "b"), filepath.Join(testFs.URI(), "a", "l")); err != nil {
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
t.Skip("Symlinks aren't working") t.Skip("Symlinks aren't working")
@ -107,7 +107,7 @@ func BenchmarkTraversesSymlink(b *testing.B) {
os.RemoveAll("testdata") os.RemoveAll("testdata")
defer os.RemoveAll("testdata") defer os.RemoveAll("testdata")
fs := fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata") fs := fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata")
_ = fs.MkdirAll("a/b/c", 0755) fs.MkdirAll("a/b/c", 0755)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
traversesSymlinkResult = osutil.TraversesSymlink(fs, "a/b/c") traversesSymlinkResult = osutil.TraversesSymlink(fs, "a/b/c")

View File

@ -132,8 +132,8 @@ func getTCPConnectionPair() (net.Conn, net.Conn, error) {
} }
// Set the buffer sizes etc as usual // Set the buffer sizes etc as usual
_ = dialer.SetTCPOptions(conn0) dialer.SetTCPOptions(conn0)
_ = dialer.SetTCPOptions(conn1) dialer.SetTCPOptions(conn1)
return conn0, conn1, nil return conn0, conn1, nil
} }

View File

@ -35,7 +35,7 @@ func repeatedDeviceID(v byte) (d DeviceID) {
func NewDeviceID(rawCert []byte) DeviceID { func NewDeviceID(rawCert []byte) DeviceID {
var n DeviceID var n DeviceID
hf := sha256.New() hf := sha256.New()
_, _ = hf.Write(rawCert) hf.Write(rawCert)
hf.Sum(n[:0]) hf.Sum(n[:0])
return n return n
} }

View File

@ -69,8 +69,8 @@ func TestClose(t *testing.T) {
t.Error("Ping should not return true") t.Error("Ping should not return true")
} }
_ = c0.Index("default", nil) c0.Index("default", nil)
_ = c0.Index("default", nil) c0.Index("default", nil)
if _, err := c0.Request("default", "foo", 0, 0, nil, 0, false); err == nil { if _, err := c0.Request("default", "foo", 0, 0, nil, 0, false); err == nil {
t.Error("Request should return an error") t.Error("Request should return an error")
@ -225,8 +225,8 @@ func testMarshal(t *testing.T, prefix string, m1, m2 message) bool {
bs1, _ := json.MarshalIndent(m1, "", " ") bs1, _ := json.MarshalIndent(m1, "", " ")
bs2, _ := json.MarshalIndent(m2, "", " ") bs2, _ := json.MarshalIndent(m2, "", " ")
if !bytes.Equal(bs1, bs2) { if !bytes.Equal(bs1, bs2) {
_ = ioutil.WriteFile(prefix+"-1.txt", bs1, 0644) ioutil.WriteFile(prefix+"-1.txt", bs1, 0644)
_ = ioutil.WriteFile(prefix+"-2.txt", bs2, 0644) ioutil.WriteFile(prefix+"-2.txt", bs2, 0644)
return false return false
} }

View File

@ -67,7 +67,7 @@ func Intn(n int) int {
// suitable for use a predictable random seed. // suitable for use a predictable random seed.
func SeedFromBytes(bs []byte) int64 { func SeedFromBytes(bs []byte) int64 {
h := md5.New() h := md5.New()
_, _ = h.Write(bs) h.Write(bs)
s := h.Sum(nil) s := h.Sum(nil)
// The MD5 hash of the byte slice is 16 bytes long. We interpret it as two // The MD5 hash of the byte slice is 16 bytes long. We interpret it as two
// uint64s and XOR them together. // uint64s and XOR them together.

View File

@ -114,7 +114,7 @@ func (p *Process) Start(bin string, args ...string) error {
} }
func (p *Process) wait() { func (p *Process) wait() {
_ = p.cmd.Wait() p.cmd.Wait()
if p.logfd != nil { if p.logfd != nil {
p.stopErr = p.checkForProblems(p.logfd) p.stopErr = p.checkForProblems(p.logfd)

View File

@ -27,7 +27,7 @@ func GetInvitationFromRelay(uri *url.URL, id syncthingprotocol.DeviceID, certs [
} }
conn := tls.Client(rconn, configForCerts(certs)) conn := tls.Client(rconn, configForCerts(certs))
_ = conn.SetDeadline(time.Now().Add(timeout)) conn.SetDeadline(time.Now().Add(timeout))
if err := performHandshakeAndValidation(conn, uri); err != nil { if err := performHandshakeAndValidation(conn, uri); err != nil {
return protocol.SessionInvitation{}, err return protocol.SessionInvitation{}, err
@ -75,7 +75,7 @@ func JoinSession(invitation protocol.SessionInvitation) (net.Conn, error) {
Key: invitation.Key, Key: invitation.Key,
} }
_ = conn.SetDeadline(time.Now().Add(10 * time.Second)) conn.SetDeadline(time.Now().Add(10 * time.Second))
err = protocol.WriteMessage(conn, request) err = protocol.WriteMessage(conn, request)
if err != nil { if err != nil {
return nil, err return nil, err
@ -86,7 +86,7 @@ func JoinSession(invitation protocol.SessionInvitation) (net.Conn, error) {
return nil, err return nil, err
} }
_ = conn.SetDeadline(time.Time{}) conn.SetDeadline(time.Time{})
switch msg := message.(type) { switch msg := message.(type) {
case protocol.Response: case protocol.Response:

View File

@ -116,7 +116,7 @@ func Validate(buf, hash []byte, weakHash uint32) bool {
return true return true
} }
// Copy error or mismatch, go to next algo. // Copy error or mismatch, go to next algo.
_, _ = rd.Seek(0, io.SeekStart) rd.Seek(0, io.SeekStart)
} }
if len(hash) > 0 { if len(hash) > 0 {

View File

@ -112,10 +112,10 @@ func TestAdler32Variants(t *testing.T) {
hf2 := rollingAdler32.New() hf2 := rollingAdler32.New()
checkFn := func(data []byte) bool { checkFn := func(data []byte) bool {
_, _ = hf1.Write(data) hf1.Write(data)
sum1 := hf1.Sum32() sum1 := hf1.Sum32()
_, _ = hf2.Write(data) hf2.Write(data)
sum2 := hf2.Sum32() sum2 := hf2.Sum32()
hf1.Reset() hf1.Reset()
@ -127,7 +127,7 @@ func TestAdler32Variants(t *testing.T) {
// protocol block sized data // protocol block sized data
data := make([]byte, protocol.MinBlockSize) data := make([]byte, protocol.MinBlockSize)
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
_, _ = rand.Read(data) rand.Read(data)
if !checkFn(data) { if !checkFn(data) {
t.Errorf("Hash mismatch on block sized data") t.Errorf("Hash mismatch on block sized data")
} }
@ -145,13 +145,13 @@ func TestAdler32Variants(t *testing.T) {
windowSize := 128 windowSize := 128
hf3 := rollingAdler32.New() hf3 := rollingAdler32.New()
_, _ = hf3.Write(data[:windowSize]) hf3.Write(data[:windowSize])
for i := windowSize; i < len(data); i++ { for i := windowSize; i < len(data); i++ {
if i%windowSize == 0 { if i%windowSize == 0 {
// let the reference function catch up // let the reference function catch up
hf2.Reset() hf2.Reset()
_, _ = hf2.Write(data[i-windowSize : i]) hf2.Write(data[i-windowSize : i])
// verify that they are in sync with the rolling function // verify that they are in sync with the rolling function
sum2 := hf2.Sum32() sum2 := hf2.Sum32()

View File

@ -108,10 +108,10 @@ func (w *walker) walk(ctx context.Context) chan ScanResult {
go func() { go func() {
hashFiles := w.walkAndHashFiles(ctx, toHashChan, finishedChan) hashFiles := w.walkAndHashFiles(ctx, toHashChan, finishedChan)
if len(w.Subs) == 0 { if len(w.Subs) == 0 {
_ = w.Filesystem.Walk(".", hashFiles) w.Filesystem.Walk(".", hashFiles)
} else { } else {
for _, sub := range w.Subs { for _, sub := range w.Subs {
_ = w.Filesystem.Walk(sub, hashFiles) w.Filesystem.Walk(sub, hashFiles)
} }
} }
close(toHashChan) close(toHashChan)
@ -223,7 +223,7 @@ func (w *walker) walkAndHashFiles(ctx context.Context, toHashChan chan<- protoco
if fs.IsTemporary(path) { if fs.IsTemporary(path) {
l.Debugln("temporary:", path, "err:", err) l.Debugln("temporary:", path, "err:", err)
if err == nil && info.IsRegular() && info.ModTime().Add(w.TempLifetime).Before(now) { if err == nil && info.IsRegular() && info.ModTime().Add(w.TempLifetime).Before(now) {
_ = w.Filesystem.Remove(path) w.Filesystem.Remove(path)
l.Debugln("removing temporary:", path, info.ModTime()) l.Debugln("removing temporary:", path, info.ModTime())
} }
return nil return nil

View File

@ -216,7 +216,7 @@ func TestNormalization(t *testing.T) {
if fd, err := testFs.OpenFile(filepath.Join("normalization", s1, s2), os.O_CREATE|os.O_EXCL, 0644); err != nil { if fd, err := testFs.OpenFile(filepath.Join("normalization", s1, s2), os.O_CREATE|os.O_EXCL, 0644); err != nil {
t.Fatal(err) t.Fatal(err)
} else { } else {
_, _ = fd.Write([]byte("test")) fd.Write([]byte("test"))
fd.Close() fd.Close()
} }
} }
@ -257,7 +257,7 @@ func TestIssue1507(t *testing.T) {
f := make(chan ScanResult, 100) f := make(chan ScanResult, 100)
fn := w.walkAndHashFiles(context.TODO(), h, f) fn := w.walkAndHashFiles(context.TODO(), h, f)
_ = fn("", nil, protocol.ErrClosed) fn("", nil, protocol.ErrClosed)
} }
func TestWalkSymlinkUnix(t *testing.T) { func TestWalkSymlinkUnix(t *testing.T) {
@ -268,9 +268,9 @@ func TestWalkSymlinkUnix(t *testing.T) {
// Create a folder with a symlink in it // Create a folder with a symlink in it
os.RemoveAll("_symlinks") os.RemoveAll("_symlinks")
_ = os.Mkdir("_symlinks", 0755) os.Mkdir("_symlinks", 0755)
defer os.RemoveAll("_symlinks") defer os.RemoveAll("_symlinks")
_ = os.Symlink("../testdata", "_symlinks/link") os.Symlink("../testdata", "_symlinks/link")
fs := fs.NewFilesystem(fs.FilesystemTypeBasic, "_symlinks") fs := fs.NewFilesystem(fs.FilesystemTypeBasic, "_symlinks")
for _, path := range []string{".", "link"} { for _, path := range []string{".", "link"} {
@ -298,7 +298,7 @@ func TestWalkSymlinkWindows(t *testing.T) {
// Create a folder with a symlink in it // Create a folder with a symlink in it
name := "_symlinks-win" name := "_symlinks-win"
os.RemoveAll(name) os.RemoveAll(name)
_ = os.Mkdir(name, 0755) os.Mkdir(name, 0755)
defer os.RemoveAll(name) defer os.RemoveAll(name)
fs := fs.NewFilesystem(fs.FilesystemTypeBasic, name) fs := fs.NewFilesystem(fs.FilesystemTypeBasic, name)
if err := osutil.DebugSymlinkForTestsOnly("../testdata", "_symlinks/link"); err != nil { if err := osutil.DebugSymlinkForTestsOnly("../testdata", "_symlinks/link"); err != nil {

View File

@ -115,12 +115,12 @@ func cpuBenchOnce(duration time.Duration, newFn func() hash.Hash) float64 {
chunkSize := 100 * 1 << 10 chunkSize := 100 * 1 << 10
h := newFn() h := newFn()
bs := make([]byte, chunkSize) bs := make([]byte, chunkSize)
_, _ = rand.Reader.Read(bs) rand.Reader.Read(bs)
t0 := time.Now() t0 := time.Now()
b := 0 b := 0
for time.Since(t0) < duration { for time.Since(t0) < duration {
_, _ = h.Write(bs) h.Write(bs)
b += chunkSize b += chunkSize
} }
h.Sum(nil) h.Sum(nil)
@ -146,7 +146,7 @@ func verifyCorrectness() {
input := "Syncthing Magic Testing Value\n" input := "Syncthing Magic Testing Value\n"
h := New() h := New()
_, _ = h.Write([]byte(input)) h.Write([]byte(input))
sum := hex.EncodeToString(h.Sum(nil)) sum := hex.EncodeToString(h.Sum(nil))
if sum != correct { if sum != correct {
panic("sha256 is broken") panic("sha256 is broken")

View File

@ -186,9 +186,9 @@ func (l *DowngradingListener) AcceptNoWrapTLS() (net.Conn, bool, error) {
} }
var first [1]byte var first [1]byte
_ = conn.SetReadDeadline(time.Now().Add(1 * time.Second)) conn.SetReadDeadline(time.Now().Add(1 * time.Second))
n, err := conn.Read(first[:]) n, err := conn.Read(first[:])
_ = conn.SetReadDeadline(time.Time{}) conn.SetReadDeadline(time.Time{})
if err != nil || n == 0 { if err != nil || n == 0 {
// We hit a read error here, but the Accept() call succeeded so we must not return an error. // We hit a read error here, but the Accept() call succeeded so we must not return an error.
// We return the connection as is with a special error which handles this // We return the connection as is with a special error which handles this
@ -308,7 +308,7 @@ JpJcUNtrf1XK49IlpWW1Ds8seQsSg7/9BQ==
c := tls.Client(c0, clientCfg) c := tls.Client(c0, clientCfg)
go func() { go func() {
_ = c.Handshake() c.Handshake()
}() }()
s := tls.Server(c1, serverCfg) s := tls.Server(c1, serverCfg)

View File

@ -202,7 +202,7 @@ func upgradeToURL(archiveName, binary string, url string) error {
return err return err
} }
if err := os.Rename(fname, binary); err != nil { if err := os.Rename(fname, binary); err != nil {
_ = os.Rename(old, binary) os.Rename(old, binary)
return err return err
} }
return nil return nil

View File

@ -59,8 +59,8 @@ func (v Simple) Archive(filePath string) error {
if err != nil { if err != nil {
if fs.IsNotExist(err) { if fs.IsNotExist(err) {
l.Debugln("creating versions dir .stversions") l.Debugln("creating versions dir .stversions")
_ = v.fs.Mkdir(versionsDir, 0755) v.fs.Mkdir(versionsDir, 0755)
_ = v.fs.Hide(versionsDir) v.fs.Hide(versionsDir)
} else { } else {
return err return err
} }

View File

@ -239,8 +239,8 @@ func (v *Staggered) Archive(filePath string) error {
if _, err := v.versionsFs.Stat("."); err != nil { if _, err := v.versionsFs.Stat("."); err != nil {
if fs.IsNotExist(err) { if fs.IsNotExist(err) {
l.Debugln("creating versions dir", v.versionsFs) l.Debugln("creating versions dir", v.versionsFs)
_ = v.versionsFs.MkdirAll(".", 0755) v.versionsFs.MkdirAll(".", 0755)
_ = v.versionsFs.Hide(".") v.versionsFs.Hide(".")
} else { } else {
return err return err
} }

View File

@ -60,7 +60,7 @@ func TestStaggeredVersioningVersionCount(t *testing.T) {
} }
sort.Strings(delete) sort.Strings(delete)
_ = os.MkdirAll("testdata/.stversions", 0755) os.MkdirAll("testdata/.stversions", 0755)
defer os.RemoveAll("testdata") defer os.RemoveAll("testdata")
v := NewStaggered("", fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata"), map[string]string{"maxAge": strconv.Itoa(365 * 86400)}).(*Staggered) v := NewStaggered("", fs.NewFilesystem(fs.FilesystemTypeBasic, "testdata"), map[string]string{"maxAge": strconv.Itoa(365 * 86400)}).(*Staggered)

View File

@ -65,7 +65,7 @@ func (t *Trashcan) Archive(filePath string) error {
if err := t.fs.MkdirAll(versionsDir, 0777); err != nil { if err := t.fs.MkdirAll(versionsDir, 0777); err != nil {
return err return err
} }
_ = t.fs.Hide(versionsDir) t.fs.Hide(versionsDir)
} }
l.Debugln("archiving", filePath) l.Debugln("archiving", filePath)
@ -84,7 +84,7 @@ func (t *Trashcan) Archive(filePath string) error {
// Set the mtime to the time the file was deleted. This is used by the // Set the mtime to the time the file was deleted. This is used by the
// cleanout routine. If this fails things won't work optimally but there's // cleanout routine. If this fails things won't work optimally but there's
// not much we can do about it so we ignore the error. // not much we can do about it so we ignore the error.
_ = t.fs.Chtimes(archivedPath, time.Now(), time.Now()) t.fs.Chtimes(archivedPath, time.Now(), time.Now())
return nil return nil
} }
@ -144,7 +144,7 @@ func (t *Trashcan) cleanoutArchive() error {
if info.ModTime().Before(cutoff) { if info.ModTime().Before(cutoff) {
// The file is too old; remove it. // The file is too old; remove it.
_ = t.fs.Remove(path) t.fs.Remove(path)
} else { } else {
// Keep this file, and remember it so we don't unnecessarily try // Keep this file, and remember it so we don't unnecessarily try
// to remove this directory. // to remove this directory.

View File

@ -42,7 +42,7 @@ func TestTrashcanCleanout(t *testing.T) {
oldTime := time.Now().Add(-8 * 24 * time.Hour) oldTime := time.Now().Add(-8 * 24 * time.Hour)
for _, tc := range testcases { for _, tc := range testcases {
_ = os.MkdirAll(filepath.Dir(tc.file), 0777) os.MkdirAll(filepath.Dir(tc.file), 0777)
if err := ioutil.WriteFile(tc.file, []byte("data"), 0644); err != nil { if err := ioutil.WriteFile(tc.file, []byte("data"), 0644); err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -42,17 +42,17 @@ func BenchmarkWeakHashAdler32(b *testing.B) {
hf := adler32.New() hf := adler32.New()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
_, _ = hf.Write(data) hf.Write(data)
} }
_ = hf.Sum32() hf.Sum32()
b.SetBytes(size) b.SetBytes(size)
} }
func BenchmarkWeakHashAdler32Roll(b *testing.B) { func BenchmarkWeakHashAdler32Roll(b *testing.B) {
data := make([]byte, size) data := make([]byte, size)
hf := adler32.New() hf := adler32.New()
_, _ = hf.Write(data) hf.Write(data)
b.ResetTimer() b.ResetTimer()
@ -70,10 +70,10 @@ func BenchmarkWeakHashRabinKarp64(b *testing.B) {
hf := rabinkarp64.New() hf := rabinkarp64.New()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
_, _ = hf.Write(data) hf.Write(data)
} }
_ = hf.Sum64() hf.Sum64()
b.SetBytes(size) b.SetBytes(size)
} }
@ -101,7 +101,7 @@ func BenchmarkWeakHashBozo32(b *testing.B) {
hf.Write(data) hf.Write(data)
} }
_ = hf.Sum32() hf.Sum32()
b.SetBytes(size) b.SetBytes(size)
} }
@ -129,7 +129,7 @@ func BenchmarkWeakHashBuzhash32(b *testing.B) {
hf.Write(data) hf.Write(data)
} }
_ = hf.Sum32() hf.Sum32()
b.SetBytes(size) b.SetBytes(size)
} }
@ -157,7 +157,7 @@ func BenchmarkWeakHashBuzhash64(b *testing.B) {
hf.Write(data) hf.Write(data)
} }
_ = hf.Sum64() hf.Sum64()
b.SetBytes(size) b.SetBytes(size)
} }

View File

@ -94,7 +94,7 @@ func generateOneFile(fd io.ReadSeeker, p1 string, s int64) error {
return err return err
} }
_ = os.Chmod(p1, os.FileMode(rand.Intn(0777)|0400)) os.Chmod(p1, os.FileMode(rand.Intn(0777)|0400))
t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second) t := time.Now().Add(-time.Duration(rand.Intn(30*86400)) * time.Second)
err = os.Chtimes(p1, t, t) err = os.Chtimes(p1, t, t)