Start rewriting integration tests in Go instead of bash

This commit is contained in:
Jakob Borg 2014-08-05 11:12:20 +02:00
parent f6c9642d72
commit 0cfac4e021
6 changed files with 408 additions and 91 deletions

View File

@ -3,5 +3,3 @@
./test-http.sh || exit
./test-merge.sh || exit
./test-delupd.sh || exit
# ./test-folders.sh || exit
./test-reconnect.sh || exit

262
integration/common_test.go Normal file
View File

@ -0,0 +1,262 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package integration_test
import (
"crypto/md5"
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"io"
"log"
mr "math/rand"
"net/http"
"os"
"os/exec"
"path/filepath"
"time"
)
type syncthingProcess struct {
log string
argv []string
port int
cmd *exec.Cmd
logfd *os.File
}
func (p *syncthingProcess) start() error {
if p.logfd == nil {
logfd, err := os.Create(p.log)
if err != nil {
return err
}
p.logfd = logfd
}
cmd := exec.Command("../bin/syncthing", p.argv...)
cmd.Stdout = p.logfd
cmd.Stderr = p.logfd
cmd.Env = append(env, fmt.Sprintf("STPROFILER=:%d", p.port+1000))
err := cmd.Start()
if err != nil {
return err
}
p.cmd = cmd
return nil
}
func (p *syncthingProcess) stop() {
p.cmd.Process.Kill()
p.cmd.Wait()
}
func (p *syncthingProcess) peerCompletion() (map[string]int, error) {
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/rest/debug/peerCompletion", p.port))
if err != nil {
return nil, err
}
defer resp.Body.Close()
comp := map[string]int{}
err = json.NewDecoder(resp.Body).Decode(&comp)
return comp, err
}
type fileGenerator struct {
files int
maxexp int
srcname string
}
func generateFiles(dir string, files, maxexp int, srcname string) error {
fd, err := os.Open(srcname)
if err != nil {
return err
}
for i := 0; i < files; i++ {
n := randomName()
p0 := filepath.Join(dir, string(n[0]), n[0:2])
err = os.MkdirAll(p0, 0755)
if err != nil {
log.Fatal(err)
}
s := 1 << uint(mr.Intn(maxexp))
a := 128 * 1024
if a > s {
a = s
}
s += mr.Intn(a)
src := io.LimitReader(&inifiteReader{fd}, int64(s))
p1 := filepath.Join(p0, n)
dst, err := os.Create(p1)
if err != nil {
return err
}
_, err = io.Copy(dst, src)
if err != nil {
return err
}
err = dst.Close()
if err != nil {
return err
}
err = os.Chmod(p1, os.FileMode(mr.Intn(0777)|0400))
if err != nil {
return err
}
t := time.Now().Add(-time.Duration(mr.Intn(30*86400)) * time.Second)
err = os.Chtimes(p1, t, t)
if err != nil {
return err
}
}
return nil
}
func randomName() string {
var b [16]byte
rand.Reader.Read(b[:])
return fmt.Sprintf("%x", b[:])
}
type inifiteReader struct {
rd io.ReadSeeker
}
func (i *inifiteReader) Read(bs []byte) (int, error) {
n, err := i.rd.Read(bs)
if err == io.EOF {
err = nil
i.rd.Seek(0, 0)
}
return n, err
}
// rm -rf
func removeAll(dirs ...string) error {
for _, dir := range dirs {
err := os.RemoveAll(dir)
if err != nil {
return err
}
}
return nil
}
// Compare a number of directories. Returns nil if the contents are identical,
// otherwise an error describing the first found difference.
func compareDirectories(dirs ...string) error {
chans := make([]chan fileInfo, len(dirs))
for i := range chans {
chans[i] = make(chan fileInfo)
}
abort := make(chan struct{})
for i := range dirs {
startWalker(dirs[i], chans[i], abort)
}
res := make([]fileInfo, len(dirs))
for {
numDone := 0
for i := range chans {
fi, ok := <-chans[i]
if !ok {
numDone++
}
res[i] = fi
}
for i := 1; i < len(res); i++ {
if res[i] != res[0] {
close(abort)
return fmt.Errorf("Mismatch; %#v (%s) != %#v (%s)", res[i], dirs[i], res[0], dirs[0])
}
}
if numDone == len(dirs) {
return nil
}
}
}
type fileInfo struct {
name string
mode os.FileMode
mod time.Time
hash [16]byte
}
func startWalker(dir string, res chan<- fileInfo, abort <-chan struct{}) {
walker := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
rn, _ := filepath.Rel(dir, path)
if rn == "." {
return nil
}
var f fileInfo
if info.IsDir() {
f = fileInfo{
name: rn,
mode: info.Mode(),
// hash and modtime zero for directories
}
} else {
f = fileInfo{
name: rn,
mode: info.Mode(),
mod: info.ModTime(),
}
sum, err := md5file(path)
if err != nil {
return err
}
f.hash = sum
}
select {
case res <- f:
return nil
case <-abort:
return errors.New("abort")
}
}
go func() {
filepath.Walk(dir, walker)
close(res)
}()
}
func md5file(fname string) (hash [16]byte, err error) {
f, err := os.Open(fname)
if err != nil {
return
}
defer f.Close()
h := md5.New()
io.Copy(h, f)
hb := h.Sum(nil)
copy(hash[:], hb)
return
}

View File

@ -26,7 +26,7 @@
<reconnectionIntervalS>5</reconnectionIntervalS>
<maxChangeKbps>10000</maxChangeKbps>
<startBrowser>false</startBrowser>
<upnpEnabled>true</upnpEnabled>
<upnpEnabled>false</upnpEnabled>
<urAccepted>-1</urAccepted>
</options>
</configuration>

View File

@ -28,7 +28,7 @@
<reconnectionIntervalS>5</reconnectionIntervalS>
<maxChangeKbps>10000</maxChangeKbps>
<startBrowser>false</startBrowser>
<upnpEnabled>true</upnpEnabled>
<upnpEnabled>false</upnpEnabled>
<urAccepted>-1</urAccepted>
</options>
</configuration>

View File

@ -0,0 +1,144 @@
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package integration_test
import (
"sync"
"testing"
"time"
)
const (
apiKey = "abc123" // Used when talking to the processes under test
id1 = "I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU"
id2 = "JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU"
)
var env = []string{
"HOME=.",
"STTRACE=model",
}
func TestRestartBothDuringTransfer(t *testing.T) {
// Give the receiver some time to rot with needed files but
// without any peer. This triggers
// https://github.com/syncthing/syncthing/issues/463
testRestartDuringTransfer(t, true, true, 10*time.Second, 0)
}
func TestRestartReceiverDuringTransfer(t *testing.T) {
testRestartDuringTransfer(t, false, true, 0, 0)
}
func TestRestartSenderDuringTransfer(t *testing.T) {
testRestartDuringTransfer(t, true, false, 0, 0)
}
func testRestartDuringTransfer(t *testing.T, restartSender, restartReceiver bool, senderDelay, receiverDelay time.Duration) {
if testing.Short() {
t.Skip("skipping integration test")
return
}
t.Log("Cleaning...")
err := removeAll("s1", "s2", "f1/index", "f2/index")
if err != nil {
t.Fatal(err)
}
t.Log("Generating files...")
err = generateFiles("s1", 1000, 20, "../bin/syncthing")
if err != nil {
t.Fatal(err)
}
t.Log("Starting up...")
sender := syncthingProcess{ // id1
log: "1.out",
argv: []string{"-home", "f1"},
port: 8081,
}
err = sender.start()
if err != nil {
t.Fatal(err)
}
receiver := syncthingProcess{ // id2
log: "2.out",
argv: []string{"-home", "f2"},
port: 8082,
}
err = receiver.start()
if err != nil {
t.Fatal(err)
}
// Give them time to start up
time.Sleep(1 * time.Second)
var prevComp int
for {
comp, err := sender.peerCompletion()
if err != nil {
sender.stop()
receiver.stop()
t.Fatal(err)
}
curComp := comp[id2]
if curComp == 100 {
sender.stop()
receiver.stop()
break
}
if curComp > prevComp {
if restartReceiver {
t.Logf("Stopping receiver...")
receiver.stop()
}
if restartSender {
t.Logf("Stopping sender...")
sender.stop()
}
var wg sync.WaitGroup
if restartReceiver {
wg.Add(1)
go func() {
time.Sleep(receiverDelay)
t.Logf("Starting receiver...")
receiver.start()
wg.Done()
}()
}
if restartSender {
wg.Add(1)
go func() {
time.Sleep(senderDelay)
t.Logf("Starting sender...")
sender.start()
wg.Done()
}()
}
wg.Wait()
prevComp = curComp
}
time.Sleep(1 * time.Second)
}
t.Log("Comparing directories...")
err = compareDirectories("s1", "s2")
if err != nil {
t.Fatal(err)
}
}

View File

@ -1,87 +0,0 @@
#!/bin/bash
# Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
# Use of this source code is governed by an MIT-style license that can be
# found in the LICENSE file.
id1=I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU
id2=JMFJCXB-GZDE4BN-OCJE3VF-65GYZNU-AIVJRET-3J6HMRQ-AUQIGJO-FKNHMQU
go build json.go
go build md5r.go
go build genfiles.go
start() {
echo "Starting..."
STTRACE=model,scanner STPROFILER=":9091" syncthing -home "f1" > 1.out 2>&1 &
STTRACE=model,scanner STPROFILER=":9092" syncthing -home "f2" > 2.out 2>&1 &
sleep 1
}
stop() {
echo "Stopping..."
for i in 1 2 ; do
curl -HX-API-Key:abc123 -X POST "http://localhost:808$i/rest/shutdown"
done
sleep 1
}
setup() {
echo "Setting up..."
rm -rf s? s??-?
rm -rf f?/*.idx.gz f?/index
mkdir -p s1
pushd s1 >/dev/null
../genfiles
../md5r > ../md5-1
popd >/dev/null
}
testConvergence() {
torestart="$1"
prevcomp=0
while true ; do
sleep 5
comp=$(curl -HX-API-Key:abc123 -s "http://localhost:8081/rest/debug/peerCompletion" | ./json "$id2")
comp=${comp:-0}
echo $comp / 100
if [[ $comp == 100 ]] ; then
echo Done
break
fi
# Restart if the destination has made some progress
if [[ $comp -gt $prevcomp ]] ; then
prevcomp=$comp
curl -HX-API-Key:abc123 -X POST "http://localhost:$torestart/rest/restart"
fi
done
echo "Verifying..."
pushd s2 >/dev/null
../md5r | grep -v .stversions > ../md5-2
popd >/dev/null
if ! cmp md5-1 md5-2 ; then
echo Repos differ
stop
exit 1
fi
}
echo Testing reconnects during pull where the source node restarts
setup
start
testConvergence 8081
stop
echo Testing reconnects during pull where the destination node restarts
setup
start
testConvergence 8082
stop
exit 0